Actual source code: iscoloring.c

  1: /*$Id: iscoloring.c,v 1.70 2001/06/21 21:15:55 bsmith Exp $*/

 3:  #include petscsys.h
 4:  #include petscis.h

  6: /*@C
  7:    ISColoringDestroy - Destroys a coloring context.

  9:    Collective on ISColoring

 11:    Input Parameter:
 12: .  iscoloring - the coloring context

 14:    Level: advanced

 16: .seealso: ISColoringView(), MatGetColoring()
 17: @*/
 18: int ISColoringDestroy(ISColoring iscoloring)
 19: {
 20:   int i,ierr;

 24:   if (--iscoloring->refct > 0) return(0);

 26:   if (iscoloring->is) {
 27:     for (i=0; i<iscoloring->n; i++) {
 28:       ISDestroy(iscoloring->is[i]);
 29:     }
 30:     PetscFree(iscoloring->is);
 31:   }
 32:   if (iscoloring->colors) {
 33:     PetscFree(iscoloring->colors);
 34:   }
 35:   PetscCommDestroy_Private(&iscoloring->comm);
 36:   PetscFree(iscoloring);
 37:   return(0);
 38: }

 40: /*@C
 41:    ISColoringView - Views a coloring context.

 43:    Collective on ISColoring

 45:    Input Parameters:
 46: +  iscoloring - the coloring context
 47: -  viewer - the viewer

 49:    Level: advanced

 51: .seealso: ISColoringDestroy(), ISColoringGetIS(), MatGetColoring()
 52: @*/
 53: int ISColoringView(ISColoring iscoloring,PetscViewer viewer)
 54: {
 55:   int        i,ierr;
 56:   PetscTruth isascii;
 57:   IS         *is;

 61:   if (!viewer) viewer = PETSC_VIEWER_STDOUT_(iscoloring->comm);

 64:   PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&isascii);
 65:   if (isascii) {
 66:     MPI_Comm comm;
 67:     int      rank;
 68:     PetscObjectGetComm((PetscObject)viewer,&comm);
 69:     MPI_Comm_rank(comm,&rank);
 70:     PetscViewerASCIISynchronizedPrintf(viewer,"[%d] Number of colors %dn",rank,iscoloring->n);
 71:     PetscViewerFlush(viewer);
 72:   } else {
 73:     SETERRQ1(1,"Viewer type %s not supported for ISColoring",((PetscObject)viewer)->type_name);
 74:   }

 76:   ISColoringGetIS(iscoloring,PETSC_IGNORE,&is);
 77:   for (i=0; i<iscoloring->n; i++) {
 78:     ISView(iscoloring->is[i],viewer);
 79:   }
 80:   ISColoringRestoreIS(iscoloring,&is);
 81:   return(0);
 82: }

 84: /*@C
 85:    ISColoringGetIS - Extracts index sets from the coloring context

 87:    Collective on ISColoring 

 89:    Input Parameter:
 90: .  iscoloring - the coloring context

 92:    Output Parameters:
 93: +  nn - number of index sets in the coloring context
 94: -  is - array of index sets

 96:    Level: advanced

 98: .seealso: ISColoringRestoreIS(), ISColoringView()
 99: @*/
100: int ISColoringGetIS(ISColoring iscoloring,int *nn,IS *isis[])
101: {


107:   if (nn)  *nn  = iscoloring->n;
108:   if (isis) {
109:     if (!iscoloring->is) {
110:       int *mcolors,**ii,nc = iscoloring->n,i,base, n = iscoloring->N;
111:       int *colors = iscoloring->colors;
112:       IS  *is;
113: 
114:       /* generate the lists of nodes for each color */
115:       PetscMalloc((nc+1)*sizeof(int),&mcolors);
116:       PetscMemzero(mcolors,nc*sizeof(int));
117:       for (i=0; i<n; i++) {
118:         mcolors[colors[i]]++;
119:       }

121:       PetscMalloc((nc+1)*sizeof(int*),&ii);
122:       PetscMalloc((n+1)*sizeof(int),&ii[0]);
123:       for (i=1; i<nc; i++) {
124:         ii[i] = ii[i-1] + mcolors[i-1];
125:       }
126: 
127:       MPI_Scan(&iscoloring->N,&base,1,MPI_INT,MPI_SUM,iscoloring->comm);
128:       base -= iscoloring->N;
129:       PetscMemzero(mcolors,nc*sizeof(int));
130:       for (i=0; i<n; i++) {
131:         ii[colors[i]][mcolors[colors[i]]++] = i + base;
132:       }
133:       PetscMalloc((nc+1)*sizeof(IS),&is);
134:       for (i=0; i<nc; i++) {
135:         ISCreateGeneral(iscoloring->comm,mcolors[i],ii[i],is+i);
136:       }

138:       iscoloring->is   = is;
139:       PetscFree(ii[0]);
140:       PetscFree(ii);
141:       PetscFree(mcolors);
142:     }
143:     *isis = iscoloring->is;
144:   }

146:   return(0);
147: }

149: /*@C
150:    ISColoringGetIS - Restores the index sets extracted from the coloring context

152:    Collective on ISColoring 

154:    Input Parameter:
155: +  iscoloring - the coloring context
156: -  is - array of index sets

158:    Level: advanced

160: .seealso: ISColoringGetIS(), ISColoringView()
161: @*/
162: int ISColoringRestoreIS(ISColoring iscoloring,IS *is[])
163: {
166: 
167:   /* currently nothing is done here */

169:   return(0);
170: }


173: /*@C
174:     ISColoringCreate - Generates an ISColoring context from lists (provided 
175:     by each processor) of colors for each node.

177:     Collective on MPI_Comm

179:     Input Parameters:
180: +   comm - communicator for the processors creating the coloring
181: .   n - number of nodes on this processor
182: -   colors - array containing the colors for this processor, color
183:              numbers begin at 0. In C/C++ this array must have been obtained with PetscMalloc()
184:              and should NOT be freed (The ISColoringDestroy() will free it).

186:     Output Parameter:
187: .   iscoloring - the resulting coloring data structure

189:     Options Database Key:
190: .   -is_coloring_view - Activates ISColoringView()

192:    Level: advanced
193:    
194:     Notes: By default sets coloring type to  IS_COLORING_LOCAL

196: .seealso: MatColoringCreate(), ISColoringView(), ISColoringDestroy(), ISColoringSetType()

198: @*/
199: int ISColoringCreate(MPI_Comm comm,int n,const int colors[],ISColoring *iscoloring)
200: {
201:   int        ierr,size,rank,base,top,tag,nc,ncwork,i;
202:   PetscTruth flg;
203:   MPI_Status status;

206:   PetscNew(struct _p_ISColoring,iscoloring);
207:   PetscCommDuplicate_Private(comm,&(*iscoloring)->comm,&tag);
208:   comm = (*iscoloring)->comm;

210:   /* compute the number of the first node on my processor */
211:   MPI_Comm_size(comm,&size);

213:   /* should use MPI_Scan() */
214:   MPI_Comm_rank(comm,&rank);
215:   if (!rank) {
216:     base = 0;
217:     top  = n;
218:   } else {
219:     MPI_Recv(&base,1,MPI_INT,rank-1,tag,comm,&status);
220:     top = base+n;
221:   }
222:   if (rank < size-1) {
223:     MPI_Send(&top,1,MPI_INT,rank+1,tag,comm);
224:   }

226:   /* compute the total number of colors */
227:   ncwork = 0;
228:   for (i=0; i<n; i++) {
229: #if defined(PETSC_USE_BOPT_g)
230:     if (colors[i] < 0) SETERRQ2(1,"Colors must be 0 or greater, you have given %d at %d",colors[i],i);
231: #endif    
232:     if (ncwork < colors[i]) ncwork = colors[i];
233:   }
234:   ncwork++;
235:   MPI_Allreduce(&ncwork,&nc,1,MPI_INT,MPI_MAX,comm);
236:   (*iscoloring)->n      = nc;
237:   (*iscoloring)->is     = 0;
238:   (*iscoloring)->colors = (int *)colors;
239:   (*iscoloring)->N      = n;
240:   (*iscoloring)->refct  = 1;
241:   (*iscoloring)->ctype  = IS_COLORING_LOCAL;

243:   PetscOptionsHasName(PETSC_NULL,"-is_coloring_view",&flg);
244:   if (flg) {
245:     ISColoringView(*iscoloring,PETSC_VIEWER_STDOUT_((*iscoloring)->comm));
246:   }
247:   PetscLogInfo(0,"ISColoringCreate: Number of colors %dn",nc);
248:   return(0);
249: }

251: /*@C
252:     ISPartitioningToNumbering - Takes an ISPartitioning and on each processor
253:     generates an IS that contains a new global node number for each index based
254:     on the partitioing.

256:     Collective on IS

258:     Input Parameters
259: .   partitioning - a partitioning as generated by MatPartitioningApply()

261:     Output Parameter:
262: .   is - on each processor the index set that defines the global numbers 
263:          (in the new numbering) for all the nodes currently (before the partitioning) 
264:          on that processor

266:    Level: advanced

268: .seealso: MatPartitioningCreate(), AOCreateBasic(), ISPartioningCount()

270: @*/
271: int ISPartitioningToNumbering(IS part,IS *is)
272: {
273:   MPI_Comm comm;
274:   int      i,ierr,size,*indices,np,n,*starts,*sums,*lsizes,*newi;

277:   PetscObjectGetComm((PetscObject)part,&comm);
278:   MPI_Comm_size(comm,&size);

280:   /* count the number of partitions, make sure <= size */
281:   ISGetLocalSize(part,&n);
282:   ISGetIndices(part,&indices);
283:   np = 0;
284:   for (i=0; i<n; i++) {
285:     np = PetscMax(np,indices[i]);
286:   }
287:   if (np >= size) {
288:     SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Number of partitions %d larger than number of processors %d",np,size);
289:   }

291:   /*
292:         lsizes - number of elements of each partition on this particular processor
293:         sums - total number of "previous" nodes for any particular partition
294:         starts - global number of first element in each partition on this processor
295:   */
296:   ierr   = PetscMalloc(3*size*sizeof(int),&lsizes);
297:   starts = lsizes + size;
298:   sums   = starts + size;
299:   ierr   = PetscMemzero(lsizes,size*sizeof(int));
300:   for (i=0; i<n; i++) {
301:     lsizes[indices[i]]++;
302:   }
303:   MPI_Allreduce(lsizes,sums,size,MPI_INT,MPI_SUM,comm);
304:   MPI_Scan(lsizes,starts,size,MPI_INT,MPI_SUM,comm);
305:   for (i=0; i<size; i++) {
306:     starts[i] -= lsizes[i];
307:   }
308:   for (i=1; i<size; i++) {
309:     sums[i]    += sums[i-1];
310:     starts[i]  += sums[i-1];
311:   }

313:   /* 
314:       For each local index give it the new global number
315:   */
316:   PetscMalloc((n+1)*sizeof(int),&newi);
317:   for (i=0; i<n; i++) {
318:     newi[i] = starts[indices[i]]++;
319:   }
320:   PetscFree(lsizes);

322:   ISRestoreIndices(part,&indices);
323:   ISCreateGeneral(comm,n,newi,is);
324:   PetscFree(newi);
325:   ISSetPermutation(*is);
326:   return(0);
327: }

329: /*@C
330:     ISPartitioningCount - Takes a ISPartitioning and determines the number of 
331:     resulting elements on each processor

333:     Collective on IS

335:     Input Parameters:
336: .   partitioning - a partitioning as generated by MatPartitioningApply()

338:     Output Parameter:
339: .   count - array of length size of communicator associated with IS, contains 
340:            the number of elements assigned to each processor

342:    Level: advanced

344: .seealso: MatPartitioningCreate(), AOCreateBasic(), ISPartitioningToNumbering()

346: @*/
347: int ISPartitioningCount(IS part,int count[])
348: {
349:   MPI_Comm comm;
350:   int      i,ierr,size,*indices,np,n,*lsizes;

353:   PetscObjectGetComm((PetscObject)part,&comm);
354:   MPI_Comm_size(comm,&size);

356:   /* count the number of partitions,make sure <= size */
357:   ISGetLocalSize(part,&n);
358:   ISGetIndices(part,&indices);
359:   np = 0;
360:   for (i=0; i<n; i++) {
361:     np = PetscMax(np,indices[i]);
362:   }
363:   if (np >= size) {
364:     SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Number of partitions %d larger than number of processors %d",np,size);
365:   }

367:   /*
368:         lsizes - number of elements of each partition on this particular processor
369:         sums - total number of "previous" nodes for any particular partition
370:         starts - global number of first element in each partition on this processor
371:   */
372:   PetscMalloc(size*sizeof(int),&lsizes);
373:   ierr   = PetscMemzero(lsizes,size*sizeof(int));
374:   for (i=0; i<n; i++) {
375:     lsizes[indices[i]]++;
376:   }
377:   ISRestoreIndices(part,&indices);
378:   MPI_Allreduce(lsizes,count,size,MPI_INT,MPI_SUM,comm);
379:   PetscFree(lsizes);

381:   return(0);
382: }

384: /*@C
385:     ISAllGather - Given an index set (IS) on each processor, generates a large 
386:     index set (same on each processor) by concatenating together each
387:     processors index set.

389:     Collective on IS

391:     Input Parameter:
392: .   is - the distributed index set

394:     Output Parameter:
395: .   isout - the concatenated index set (same on all processors)

397:     Notes: 
398:     ISAllGather() is clearly not scalable for large index sets.

400:     The IS created on each processor must be created with a common
401:     communicator (e.g., PETSC_COMM_WORLD). If the index sets were created 
402:     with PETSC_COMM_SELF, this routine will not work as expected, since 
403:     each process will generate its own new IS that consists only of
404:     itself.

406:     Level: intermediate

408:     Concepts: gather^index sets
409:     Concepts: index sets^gathering to all processors
410:     Concepts: IS^gathering to all processors

412: .seealso: ISCreateGeneral(), ISCreateStride(), ISCreateBlock(), ISAllGatherIndices()
413: @*/
414: int ISAllGather(IS is,IS *isout)
415: {
416:   int      *indices,*sizes,size,*offsets,n,*lindices,i,N,ierr;
417:   MPI_Comm comm;


422:   PetscObjectGetComm((PetscObject)is,&comm);
423:   MPI_Comm_size(comm,&size);
424:   PetscMalloc(2*size*sizeof(int),&sizes);
425:   offsets = sizes + size;
426: 
427:   ISGetLocalSize(is,&n);
428:   MPI_Allgather(&n,1,MPI_INT,sizes,1,MPI_INT,comm);
429:   offsets[0] = 0;
430:   for (i=1;i<size; i++) offsets[i] = offsets[i-1] + sizes[i-1];
431:   N = offsets[size-1] + sizes[size-1];

433:   PetscMalloc((N+1)*sizeof(int),&indices);
434:   ISGetIndices(is,&lindices);
435:   MPI_Allgatherv(lindices,n,MPI_INT,indices,sizes,offsets,MPI_INT,comm);
436:   ISRestoreIndices(is,&lindices);

438:   ISCreateGeneral(PETSC_COMM_SELF,N,indices,isout);
439:   PetscFree(indices);

441:   PetscFree(sizes);
442:   return(0);
443: }

445: /*@C
446:     ISAllGatherIndices - Given a a set of integers on each processor, generates a large 
447:     set (same on each processor) by concatenating together each processors integers

449:     Collective on MPI_Comm

451:     Input Parameter:
452: +   comm - communicator to share the indices
453: .   n - local size of set
454: -   lindices - local indices

456:     Output Parameter:
457: +   outN - total number of indices
458: -   outindices - all of the integers

460:     Notes: 
461:     ISAllGatherIndices() is clearly not scalable for large index sets.


464:     Level: intermediate

466:     Concepts: gather^index sets
467:     Concepts: index sets^gathering to all processors
468:     Concepts: IS^gathering to all processors

470: .seealso: ISCreateGeneral(), ISCreateStride(), ISCreateBlock(), ISAllGather()
471: @*/
472: int ISAllGatherIndices(MPI_Comm comm,int n,int *lindices,int *outN,int **outindices)
473: {
474:   int *indices,*sizes,size,*offsets,i,N,ierr;

477:   MPI_Comm_size(comm,&size);
478:   PetscMalloc(2*size*sizeof(int),&sizes);
479:   offsets = sizes + size;
480: 
481:   MPI_Allgather(&n,1,MPI_INT,sizes,1,MPI_INT,comm);
482:   offsets[0] = 0;
483:   for (i=1;i<size; i++) offsets[i] = offsets[i-1] + sizes[i-1];
484:   N    = offsets[size-1] + sizes[size-1];
485:   PetscFree(sizes);

487:   PetscMalloc((N+1)*sizeof(int),&indices);
488:   MPI_Allgatherv(lindices,n,MPI_INT,indices,sizes,offsets,MPI_INT,comm);

490:   *outindices = indices;
491:   if (outN) *outN = N;
492:   return(0);
493: }