Actual source code: isltog.c

  1: #define PETSCVEC_DLL

 3:  #include petscvec.h
 4:  #include src/vec/is/isimpl.h

  6: PetscCookie  IS_LTOGM_COOKIE = -1;

 10: /*@C
 11:     ISLocalToGlobalMappingGetSize - Gets the local size of a local to global mapping.

 13:     Not Collective

 15:     Input Parameter:
 16: .   ltog - local to global mapping

 18:     Output Parameter:
 19: .   n - the number of entries in the local mapping

 21:     Level: advanced

 23:     Concepts: mapping^local to global

 25: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate()
 26: @*/
 27: PetscErrorCode  ISLocalToGlobalMappingGetSize(ISLocalToGlobalMapping mapping,PetscInt *n)
 28: {
 32:   *n = mapping->n;
 33:   return(0);
 34: }

 38: /*@C
 39:     ISLocalToGlobalMappingView - View a local to global mapping

 41:     Not Collective

 43:     Input Parameters:
 44: +   ltog - local to global mapping
 45: -   viewer - viewer

 47:     Level: advanced

 49:     Concepts: mapping^local to global

 51: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate()
 52: @*/
 53: PetscErrorCode  ISLocalToGlobalMappingView(ISLocalToGlobalMapping mapping,PetscViewer viewer)
 54: {
 55:   PetscInt        i;
 56:   PetscMPIInt     rank;
 57:   PetscTruth      iascii;
 58:   PetscErrorCode  ierr;

 62:   if (!viewer) viewer = PETSC_VIEWER_STDOUT_(mapping->comm);

 65:   MPI_Comm_rank(mapping->comm,&rank);
 66:   PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&iascii);
 67:   if (iascii) {
 68:     for (i=0; i<mapping->n; i++) {
 69:       PetscViewerASCIISynchronizedPrintf(viewer,"[%d] %d %d\n",rank,i,mapping->indices[i]);
 70:     }
 71:     PetscViewerFlush(viewer);
 72:   } else {
 73:     SETERRQ1(PETSC_ERR_SUP,"Viewer type %s not supported for ISLocalToGlobalMapping",((PetscObject)viewer)->type_name);
 74:   }

 76:   return(0);
 77: }

 81: /*@
 82:     ISLocalToGlobalMappingCreateIS - Creates a mapping between a local (0 to n)
 83:     ordering and a global parallel ordering.

 85:     Not collective

 87:     Input Parameter:
 88: .   is - index set containing the global numbers for each local

 90:     Output Parameter:
 91: .   mapping - new mapping data structure

 93:     Level: advanced

 95:     Concepts: mapping^local to global

 97: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate()
 98: @*/
 99: PetscErrorCode  ISLocalToGlobalMappingCreateIS(IS is,ISLocalToGlobalMapping *mapping)
100: {
102:   PetscInt      n,*indices;
103:   MPI_Comm comm;


109:   PetscObjectGetComm((PetscObject)is,&comm);
110:   ISGetLocalSize(is,&n);
111:   ISGetIndices(is,&indices);
112:   ISLocalToGlobalMappingCreate(comm,n,indices,mapping);
113:   ISRestoreIndices(is,&indices);

115:   return(0);
116: }


121: /*@
122:     ISLocalToGlobalMappingCreate - Creates a mapping between a local (0 to n)
123:     ordering and a global parallel ordering.

125:     Not Collective, but communicator may have more than one process

127:     Input Parameters:
128: +   comm - MPI communicator
129: .   n - the number of local elements
130: -   indices - the global index for each local element

132:     Output Parameter:
133: .   mapping - new mapping data structure

135:     Level: advanced

137:     Concepts: mapping^local to global

139: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreateNC()
140: @*/
141: PetscErrorCode  ISLocalToGlobalMappingCreate(MPI_Comm cm,PetscInt n,const PetscInt indices[],ISLocalToGlobalMapping *mapping)
142: {
144:   PetscInt       *in;

149:   PetscMalloc(n*sizeof(PetscInt),&in);
150:   PetscMemcpy(in,indices,n*sizeof(PetscInt));
151:   ISLocalToGlobalMappingCreateNC(cm,n,in,mapping);
152:   return(0);
153: }

157: /*@C
158:     ISLocalToGlobalMappingCreateNC - Creates a mapping between a local (0 to n)
159:     ordering and a global parallel ordering.

161:     Not Collective, but communicator may have more than one process

163:     Input Parameters:
164: +   comm - MPI communicator
165: .   n - the number of local elements
166: -   indices - the global index for each local element

168:     Output Parameter:
169: .   mapping - new mapping data structure

171:     Level: developer

173:     Notes: Does not copy the indices, just keeps the pointer to the indices. The ISLocalToGlobalMappingDestroy()
174:     will free the space so it must be obtained with PetscMalloc() and it must not be freed elsewhere.

176:     Concepts: mapping^local to global

178: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreate()
179: @*/
180: PetscErrorCode  ISLocalToGlobalMappingCreateNC(MPI_Comm cm,PetscInt n,const PetscInt indices[],ISLocalToGlobalMapping *mapping)
181: {

185:   if (n) {
187:   }
189:   *mapping = PETSC_NULL;
190: #ifndef PETSC_USE_DYNAMIC_LIBRARIES
191:   VecInitializePackage(PETSC_NULL);
192: #endif
193:   if (IS_LTOGM_COOKIE == -1) {
194:     PetscLogClassRegister(&IS_LTOGM_COOKIE,"IS Local to global mapping");
195:   }

197:   PetscHeaderCreate(*mapping,_p_ISLocalToGlobalMapping,int,IS_LTOGM_COOKIE,0,"ISLocalToGlobalMapping",
198:                     cm,ISLocalToGlobalMappingDestroy,ISLocalToGlobalMappingView);
199:   PetscLogObjectMemory(*mapping,sizeof(struct _p_ISLocalToGlobalMapping)+n*sizeof(PetscInt));

201:   (*mapping)->n       = n;
202:   (*mapping)->indices = (PetscInt*)indices;

204:   /*
205:       Do not create the global to local mapping. This is only created if 
206:      ISGlobalToLocalMapping() is called 
207:   */
208:   (*mapping)->globals = 0;
209:   return(0);
210: }

214: /*@
215:     ISLocalToGlobalMappingBlock - Creates a blocked index version of an 
216:        ISLocalToGlobalMapping that is appropriate for MatSetLocalToGlobalMappingBlock()
217:        and VecSetLocalToGlobalMappingBlock().

219:     Not Collective, but communicator may have more than one process

221:     Input Parameters:
222: +    inmap - original point-wise mapping
223: -    bs - block size

225:     Output Parameter:
226: .   outmap - block based mapping; the indices are relative to BLOCKS, not individual vector or matrix entries.

228:     Level: advanced

230:     Concepts: mapping^local to global

232: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate(), ISLocalToGlobalMappingCreateIS()
233: @*/
234: PetscErrorCode  ISLocalToGlobalMappingBlock(ISLocalToGlobalMapping inmap,PetscInt bs,ISLocalToGlobalMapping *outmap)
235: {
237:   PetscInt       *ii,i,n;

240:   if (bs > 1) {
241:     n    = inmap->n/bs;
242:     if (n*bs != inmap->n) SETERRQ(PETSC_ERR_ARG_INCOMP,"Pointwise mapping length is not divisible by block size");
243:     PetscMalloc(n*sizeof(PetscInt),&ii);
244:     for (i=0; i<n; i++) {
245:       ii[i] = inmap->indices[bs*i]/bs;
246:     }
247:     ISLocalToGlobalMappingCreate(inmap->comm,n,ii,outmap);
248:     PetscFree(ii);
249:   } else {
250:     *outmap = inmap;
251:     PetscObjectReference((PetscObject)inmap);
252:   }
253:   return(0);
254: }
255: 
258: /*@
259:    ISLocalToGlobalMappingDestroy - Destroys a mapping between a local (0 to n)
260:    ordering and a global parallel ordering.

262:    Note Collective

264:    Input Parameters:
265: .  mapping - mapping data structure

267:    Level: advanced

269: .seealso: ISLocalToGlobalMappingCreate()
270: @*/
271: PetscErrorCode  ISLocalToGlobalMappingDestroy(ISLocalToGlobalMapping mapping)
272: {
276:   if (--mapping->refct > 0) return(0);
277:   if (mapping->refct < 0) {
278:     SETERRQ(PETSC_ERR_PLIB,"Mapping already destroyed");
279:   }

281:   PetscFree(mapping->indices);
282:   PetscFree(mapping->globals);
283:   PetscHeaderDestroy(mapping);
284:   return(0);
285: }
286: 
289: /*@
290:     ISLocalToGlobalMappingApplyIS - Creates from an IS in the local numbering
291:     a new index set using the global numbering defined in an ISLocalToGlobalMapping
292:     context.

294:     Not collective

296:     Input Parameters:
297: +   mapping - mapping between local and global numbering
298: -   is - index set in local numbering

300:     Output Parameters:
301: .   newis - index set in global numbering

303:     Level: advanced

305:     Concepts: mapping^local to global

307: .seealso: ISLocalToGlobalMappingApply(), ISLocalToGlobalMappingCreate(),
308:           ISLocalToGlobalMappingDestroy(), ISGlobalToLocalMappingApply()
309: @*/
310: PetscErrorCode  ISLocalToGlobalMappingApplyIS(ISLocalToGlobalMapping mapping,IS is,IS *newis)
311: {
313:   PetscInt            n,i,*idxin,*idxmap,*idxout,Nmax = mapping->n;


320:   ISGetLocalSize(is,&n);
321:   ISGetIndices(is,&idxin);
322:   idxmap = mapping->indices;
323: 
324:   PetscMalloc(n*sizeof(PetscInt),&idxout);
325:   for (i=0; i<n; i++) {
326:     if (idxin[i] >= Nmax) SETERRQ3(PETSC_ERR_ARG_OUTOFRANGE,"Local index %d too large %d (max) at %d",idxin[i],Nmax-1,i);
327:     idxout[i] = idxmap[idxin[i]];
328:   }
329:   ISRestoreIndices(is,&idxin);
330:   ISCreateGeneral(PETSC_COMM_SELF,n,idxout,newis);
331:   PetscFree(idxout);
332:   return(0);
333: }

335: /*MC
336:    ISLocalToGlobalMappingApply - Takes a list of integers in a local numbering
337:    and converts them to the global numbering.

339:    Not collective

341:    Input Parameters:
342: +  mapping - the local to global mapping context
343: .  N - number of integers
344: -  in - input indices in local numbering

346:    Output Parameter:
347: .  out - indices in global numbering

349:    Synopsis:
350:    PetscErrorCode ISLocalToGlobalMappingApply(ISLocalToGlobalMapping mapping,int N,int in[],int out[])

352:    Notes: 
353:    The in and out array parameters may be identical.

355:    Level: advanced

357: .seealso: ISLocalToGlobalMappingCreate(),ISLocalToGlobalMappingDestroy(), 
358:           ISLocalToGlobalMappingApplyIS(),AOCreateBasic(),AOApplicationToPetsc(),
359:           AOPetscToApplication(), ISGlobalToLocalMappingApply()

361:     Concepts: mapping^local to global

363: M*/

365: /* -----------------------------------------------------------------------------------------*/

369: /*
370:     Creates the global fields in the ISLocalToGlobalMapping structure
371: */
372: static PetscErrorCode ISGlobalToLocalMappingSetUp_Private(ISLocalToGlobalMapping mapping)
373: {
375:   PetscInt            i,*idx = mapping->indices,n = mapping->n,end,start,*globals;

378:   end   = 0;
379:   start = 100000000;

381:   for (i=0; i<n; i++) {
382:     if (idx[i] < 0) continue;
383:     if (idx[i] < start) start = idx[i];
384:     if (idx[i] > end)   end   = idx[i];
385:   }
386:   if (start > end) {start = 0; end = -1;}
387:   mapping->globalstart = start;
388:   mapping->globalend   = end;

390:   PetscMalloc((end-start+2)*sizeof(PetscInt),&globals);
391:   mapping->globals = globals;
392:   for (i=0; i<end-start+1; i++) {
393:     globals[i] = -1;
394:   }
395:   for (i=0; i<n; i++) {
396:     if (idx[i] < 0) continue;
397:     globals[idx[i] - start] = i;
398:   }

400:   PetscLogObjectMemory(mapping,(end-start+1)*sizeof(PetscInt));
401:   return(0);
402: }

406: /*@
407:     ISGlobalToLocalMappingApply - Provides the local numbering for a list of integers
408:     specified with a global numbering.

410:     Not collective

412:     Input Parameters:
413: +   mapping - mapping between local and global numbering
414: .   type - IS_GTOLM_MASK - replaces global indices with no local value with -1
415:            IS_GTOLM_DROP - drops the indices with no local value from the output list
416: .   n - number of global indices to map
417: -   idx - global indices to map

419:     Output Parameters:
420: +   nout - number of indices in output array (if type == IS_GTOLM_MASK then nout = n)
421: -   idxout - local index of each global index, one must pass in an array long enough 
422:              to hold all the indices. You can call ISGlobalToLocalMappingApply() with 
423:              idxout == PETSC_NULL to determine the required length (returned in nout)
424:              and then allocate the required space and call ISGlobalToLocalMappingApply()
425:              a second time to set the values.

427:     Notes:
428:     Either nout or idxout may be PETSC_NULL. idx and idxout may be identical.

430:     This is not scalable in memory usage. Each processor requires O(Nglobal) size 
431:     array to compute these.

433:     Level: advanced

435:     Concepts: mapping^global to local

437: .seealso: ISLocalToGlobalMappingApply(), ISLocalToGlobalMappingCreate(),
438:           ISLocalToGlobalMappingDestroy()
439: @*/
440: PetscErrorCode  ISGlobalToLocalMappingApply(ISLocalToGlobalMapping mapping,ISGlobalToLocalMappingType type,
441:                                   PetscInt n,const PetscInt idx[],PetscInt *nout,PetscInt idxout[])
442: {
443:   PetscInt i,*globals,nf = 0,tmp,start,end;

447:   if (!mapping->globals) {
448:     ISGlobalToLocalMappingSetUp_Private(mapping);
449:   }
450:   globals = mapping->globals;
451:   start   = mapping->globalstart;
452:   end     = mapping->globalend;

454:   if (type == IS_GTOLM_MASK) {
455:     if (idxout) {
456:       for (i=0; i<n; i++) {
457:         if (idx[i] < 0) idxout[i] = idx[i];
458:         else if (idx[i] < start) idxout[i] = -1;
459:         else if (idx[i] > end)   idxout[i] = -1;
460:         else                     idxout[i] = globals[idx[i] - start];
461:       }
462:     }
463:     if (nout) *nout = n;
464:   } else {
465:     if (idxout) {
466:       for (i=0; i<n; i++) {
467:         if (idx[i] < 0) continue;
468:         if (idx[i] < start) continue;
469:         if (idx[i] > end) continue;
470:         tmp = globals[idx[i] - start];
471:         if (tmp < 0) continue;
472:         idxout[nf++] = tmp;
473:       }
474:     } else {
475:       for (i=0; i<n; i++) {
476:         if (idx[i] < 0) continue;
477:         if (idx[i] < start) continue;
478:         if (idx[i] > end) continue;
479:         tmp = globals[idx[i] - start];
480:         if (tmp < 0) continue;
481:         nf++;
482:       }
483:     }
484:     if (nout) *nout = nf;
485:   }

487:   return(0);
488: }

492: /*@C
493:     ISLocalToGlobalMappingGetInfo - Gets the neighbor information for each processor and 
494:      each index shared by more than one processor 

496:     Collective on ISLocalToGlobalMapping

498:     Input Parameters:
499: .   mapping - the mapping from local to global indexing

501:     Output Parameter:
502: +   nproc - number of processors that are connected to this one
503: .   proc - neighboring processors
504: .   numproc - number of indices for each subdomain (processor)
505: -   indices - indices of local nodes shared with neighbor (sorted by global numbering)

507:     Level: advanced

509:     Concepts: mapping^local to global

511:     Fortran Usage: 
512: $        ISLocalToGlobalMpngGetInfoSize(ISLocalToGlobalMapping,PetscInt nproc,PetscInt numprocmax,ierr) followed by 
513: $        ISLocalToGlobalMappingGetInfo(ISLocalToGlobalMapping,PetscInt nproc, PetscInt procs[nproc],PetscInt numprocs[nproc],
514:           PetscInt indices[nproc][numprocmax],ierr)
515:         There is no ISLocalToGlobalMappingRestoreInfo() in Fortran. You must make sure that procs[], numprocs[] and 
516:         indices[][] are large enough arrays, either by allocating them dynamically or defining static ones large enough.


519: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreate(),
520:           ISLocalToGlobalMappingRestoreInfo()
521: @*/
522: PetscErrorCode  ISLocalToGlobalMappingGetInfo(ISLocalToGlobalMapping mapping,PetscInt *nproc,PetscInt *procs[],PetscInt *numprocs[],PetscInt **indices[])
523: {
525:   PetscMPIInt    size,rank,tag1,tag2,tag3,*len,*source,imdex;
526:   PetscInt       i,n = mapping->n,Ng,ng,max = 0,*lindices = mapping->indices;
527:   PetscInt       *nprocs,*owner,nsends,*sends,j,*starts,nmax,nrecvs,*recvs,proc;
528:   PetscInt       cnt,scale,*ownedsenders,*nownedsenders,rstart,nowned;
529:   PetscInt       node,nownedm,nt,*sends2,nsends2,*starts2,*lens2,*dest,nrecvs2,*starts3,*recvs2,k,*bprocs,*tmp;
530:   PetscInt       first_procs,first_numprocs,*first_indices;
531:   MPI_Request    *recv_waits,*send_waits;
532:   MPI_Status     recv_status,*send_status,*recv_statuses;
533:   MPI_Comm       comm = mapping->comm;
534:   PetscTruth     debug = PETSC_FALSE;

537:   MPI_Comm_size(comm,&size);
538:   MPI_Comm_rank(comm,&rank);
539:   if (size == 1) {
540:     *nproc         = 0;
541:     *procs         = PETSC_NULL;
542:     PetscMalloc(sizeof(PetscInt),numprocs);
543:     (*numprocs)[0] = 0;
544:     PetscMalloc(sizeof(PetscInt*),indices);
545:     (*indices)[0]  = PETSC_NULL;
546:     return(0);
547:   }

549:   PetscOptionsHasName(PETSC_NULL,"-islocaltoglobalmappinggetinfo_debug",&debug);

551:   /*
552:     Notes on ISLocalToGlobalMappingGetInfo

554:     globally owned node - the nodes that have been assigned to this processor in global
555:            numbering, just for this routine.

557:     nontrivial globally owned node - node assigned to this processor that is on a subdomain
558:            boundary (i.e. is has more than one local owner)

560:     locally owned node - node that exists on this processors subdomain

562:     nontrivial locally owned node - node that is not in the interior (i.e. has more than one
563:            local subdomain
564:   */
565:   PetscObjectGetNewTag((PetscObject)mapping,&tag1);
566:   PetscObjectGetNewTag((PetscObject)mapping,&tag2);
567:   PetscObjectGetNewTag((PetscObject)mapping,&tag3);

569:   for (i=0; i<n; i++) {
570:     if (lindices[i] > max) max = lindices[i];
571:   }
572:   MPI_Allreduce(&max,&Ng,1,MPIU_INT,MPI_MAX,comm);
573:   Ng++;
574:   MPI_Comm_size(comm,&size);
575:   MPI_Comm_rank(comm,&rank);
576:   scale  = Ng/size + 1;
577:   ng     = scale; if (rank == size-1) ng = Ng - scale*(size-1); ng = PetscMax(1,ng);
578:   rstart = scale*rank;

580:   /* determine ownership ranges of global indices */
581:   PetscMalloc(2*size*sizeof(PetscInt),&nprocs);
582:   PetscMemzero(nprocs,2*size*sizeof(PetscInt));

584:   /* determine owners of each local node  */
585:   PetscMalloc(n*sizeof(PetscInt),&owner);
586:   for (i=0; i<n; i++) {
587:     proc             = lindices[i]/scale; /* processor that globally owns this index */
588:     nprocs[2*proc+1] = 1;                 /* processor globally owns at least one of ours */
589:     owner[i]         = proc;
590:     nprocs[2*proc]++;                     /* count of how many that processor globally owns of ours */
591:   }
592:   nsends = 0; for (i=0; i<size; i++) nsends += nprocs[2*i+1];
593:   PetscInfo1(0,"Number of global owners for my local data %d\n",nsends);

595:   /* inform other processors of number of messages and max length*/
596:   PetscMaxSum(comm,nprocs,&nmax,&nrecvs);
597:   PetscInfo1(0,"Number of local owners for my global data %d\n",nrecvs);

599:   /* post receives for owned rows */
600:   PetscMalloc((2*nrecvs+1)*(nmax+1)*sizeof(PetscInt),&recvs);
601:   PetscMalloc((nrecvs+1)*sizeof(MPI_Request),&recv_waits);
602:   for (i=0; i<nrecvs; i++) {
603:     MPI_Irecv(recvs+2*nmax*i,2*nmax,MPIU_INT,MPI_ANY_SOURCE,tag1,comm,recv_waits+i);
604:   }

606:   /* pack messages containing lists of local nodes to owners */
607:   PetscMalloc((2*n+1)*sizeof(PetscInt),&sends);
608:   PetscMalloc((size+1)*sizeof(PetscInt),&starts);
609:   starts[0]  = 0;
610:   for (i=1; i<size; i++) { starts[i] = starts[i-1] + 2*nprocs[2*i-2];}
611:   for (i=0; i<n; i++) {
612:     sends[starts[owner[i]]++] = lindices[i];
613:     sends[starts[owner[i]]++] = i;
614:   }
615:   PetscFree(owner);
616:   starts[0]  = 0;
617:   for (i=1; i<size; i++) { starts[i] = starts[i-1] + 2*nprocs[2*i-2];}

619:   /* send the messages */
620:   PetscMalloc((nsends+1)*sizeof(MPI_Request),&send_waits);
621:   PetscMalloc((nsends+1)*sizeof(PetscInt),&dest);
622:   cnt = 0;
623:   for (i=0; i<size; i++) {
624:     if (nprocs[2*i]) {
625:       MPI_Isend(sends+starts[i],2*nprocs[2*i],MPIU_INT,i,tag1,comm,send_waits+cnt);
626:       dest[cnt] = i;
627:       cnt++;
628:     }
629:   }
630:   PetscFree(starts);

632:   /* wait on receives */
633:   PetscMalloc((nrecvs+1)*sizeof(PetscMPIInt),&source);
634:   PetscMalloc((nrecvs+1)*sizeof(PetscMPIInt),&len);
635:   cnt  = nrecvs;
636:   PetscMalloc((ng+1)*sizeof(PetscInt),&nownedsenders);
637:   PetscMemzero(nownedsenders,ng*sizeof(PetscInt));
638:   while (cnt) {
639:     MPI_Waitany(nrecvs,recv_waits,&imdex,&recv_status);
640:     /* unpack receives into our local space */
641:     MPI_Get_count(&recv_status,MPIU_INT,&len[imdex]);
642:     source[imdex]  = recv_status.MPI_SOURCE;
643:     len[imdex]     = len[imdex]/2;
644:     /* count how many local owners for each of my global owned indices */
645:     for (i=0; i<len[imdex]; i++) nownedsenders[recvs[2*imdex*nmax+2*i]-rstart]++;
646:     cnt--;
647:   }
648:   PetscFree(recv_waits);

650:   /* count how many globally owned indices are on an edge multiplied by how many processors own them. */
651:   nowned  = 0;
652:   nownedm = 0;
653:   for (i=0; i<ng; i++) {
654:     if (nownedsenders[i] > 1) {nownedm += nownedsenders[i]; nowned++;}
655:   }

657:   /* create single array to contain rank of all local owners of each globally owned index */
658:   PetscMalloc((nownedm+1)*sizeof(PetscInt),&ownedsenders);
659:   PetscMalloc((ng+1)*sizeof(PetscInt),&starts);
660:   starts[0] = 0;
661:   for (i=1; i<ng; i++) {
662:     if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
663:     else starts[i] = starts[i-1];
664:   }

666:   /* for each nontrival globally owned node list all arriving processors */
667:   for (i=0; i<nrecvs; i++) {
668:     for (j=0; j<len[i]; j++) {
669:       node = recvs[2*i*nmax+2*j]-rstart;
670:       if (nownedsenders[node] > 1) {
671:         ownedsenders[starts[node]++] = source[i];
672:       }
673:     }
674:   }

676:   if (debug) { /* -----------------------------------  */
677:     starts[0]    = 0;
678:     for (i=1; i<ng; i++) {
679:       if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
680:       else starts[i] = starts[i-1];
681:     }
682:     for (i=0; i<ng; i++) {
683:       if (nownedsenders[i] > 1) {
684:         PetscSynchronizedPrintf(comm,"[%d] global node %d local owner processors: ",rank,i+rstart);
685:         for (j=0; j<nownedsenders[i]; j++) {
686:           PetscSynchronizedPrintf(comm,"%d ",ownedsenders[starts[i]+j]);
687:         }
688:         PetscSynchronizedPrintf(comm,"\n");
689:       }
690:     }
691:     PetscSynchronizedFlush(comm);
692:   }/* -----------------------------------  */

694:   /* wait on original sends */
695:   if (nsends) {
696:     PetscMalloc(nsends*sizeof(MPI_Status),&send_status);
697:     MPI_Waitall(nsends,send_waits,send_status);
698:     PetscFree(send_status);
699:   }
700:   PetscFree(send_waits);
701:   PetscFree(sends);
702:   PetscFree(nprocs);

704:   /* pack messages to send back to local owners */
705:   starts[0]    = 0;
706:   for (i=1; i<ng; i++) {
707:     if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
708:     else starts[i] = starts[i-1];
709:   }
710:   nsends2 = nrecvs;
711:   PetscMalloc((nsends2+1)*sizeof(PetscInt),&nprocs); /* length of each message */
712:   for (i=0; i<nrecvs; i++) {
713:     nprocs[i] = 1;
714:     for (j=0; j<len[i]; j++) {
715:       node = recvs[2*i*nmax+2*j]-rstart;
716:       if (nownedsenders[node] > 1) {
717:         nprocs[i] += 2 + nownedsenders[node];
718:       }
719:     }
720:   }
721:   nt = 0; for (i=0; i<nsends2; i++) nt += nprocs[i];
722:   PetscMalloc((nt+1)*sizeof(PetscInt),&sends2);
723:   PetscMalloc((nsends2+1)*sizeof(PetscInt),&starts2);
724:   starts2[0] = 0; for (i=1; i<nsends2; i++) starts2[i] = starts2[i-1] + nprocs[i-1];
725:   /*
726:      Each message is 1 + nprocs[i] long, and consists of 
727:        (0) the number of nodes being sent back 
728:        (1) the local node number,
729:        (2) the number of processors sharing it,
730:        (3) the processors sharing it
731:   */
732:   for (i=0; i<nsends2; i++) {
733:     cnt = 1;
734:     sends2[starts2[i]] = 0;
735:     for (j=0; j<len[i]; j++) {
736:       node = recvs[2*i*nmax+2*j]-rstart;
737:       if (nownedsenders[node] > 1) {
738:         sends2[starts2[i]]++;
739:         sends2[starts2[i]+cnt++] = recvs[2*i*nmax+2*j+1];
740:         sends2[starts2[i]+cnt++] = nownedsenders[node];
741:         PetscMemcpy(&sends2[starts2[i]+cnt],&ownedsenders[starts[node]],nownedsenders[node]*sizeof(PetscInt));
742:         cnt += nownedsenders[node];
743:       }
744:     }
745:   }

747:   /* receive the message lengths */
748:   nrecvs2 = nsends;
749:   PetscMalloc((nrecvs2+1)*sizeof(PetscInt),&lens2);
750:   PetscMalloc((nrecvs2+1)*sizeof(PetscInt),&starts3);
751:   PetscMalloc((nrecvs2+1)*sizeof(MPI_Request),&recv_waits);
752:   for (i=0; i<nrecvs2; i++) {
753:     MPI_Irecv(&lens2[i],1,MPIU_INT,dest[i],tag2,comm,recv_waits+i);
754:   }

756:   /* send the message lengths */
757:   for (i=0; i<nsends2; i++) {
758:     MPI_Send(&nprocs[i],1,MPIU_INT,source[i],tag2,comm);
759:   }

761:   /* wait on receives of lens */
762:   if (nrecvs2) {
763:     PetscMalloc(nrecvs2*sizeof(MPI_Status),&recv_statuses);
764:     MPI_Waitall(nrecvs2,recv_waits,recv_statuses);
765:     PetscFree(recv_statuses);
766:   }
767:   PetscFree(recv_waits);

769:   starts3[0] = 0;
770:   nt         = 0;
771:   for (i=0; i<nrecvs2-1; i++) {
772:     starts3[i+1] = starts3[i] + lens2[i];
773:     nt          += lens2[i];
774:   }
775:   nt += lens2[nrecvs2-1];

777:   PetscMalloc((nt+1)*sizeof(PetscInt),&recvs2);
778:   PetscMalloc((nrecvs2+1)*sizeof(MPI_Request),&recv_waits);
779:   for (i=0; i<nrecvs2; i++) {
780:     MPI_Irecv(recvs2+starts3[i],lens2[i],MPIU_INT,dest[i],tag3,comm,recv_waits+i);
781:   }
782: 
783:   /* send the messages */
784:   PetscMalloc((nsends2+1)*sizeof(MPI_Request),&send_waits);
785:   for (i=0; i<nsends2; i++) {
786:     MPI_Isend(sends2+starts2[i],nprocs[i],MPIU_INT,source[i],tag3,comm,send_waits+i);
787:   }

789:   /* wait on receives */
790:   if (nrecvs2) {
791:     PetscMalloc(nrecvs2*sizeof(MPI_Status),&recv_statuses);
792:     MPI_Waitall(nrecvs2,recv_waits,recv_statuses);
793:     PetscFree(recv_statuses);
794:   }
795:   PetscFree(recv_waits);
796:   PetscFree(nprocs);

798:   if (debug) { /* -----------------------------------  */
799:     cnt = 0;
800:     for (i=0; i<nrecvs2; i++) {
801:       nt = recvs2[cnt++];
802:       for (j=0; j<nt; j++) {
803:         PetscSynchronizedPrintf(comm,"[%d] local node %d number of subdomains %d: ",rank,recvs2[cnt],recvs2[cnt+1]);
804:         for (k=0; k<recvs2[cnt+1]; k++) {
805:           PetscSynchronizedPrintf(comm,"%d ",recvs2[cnt+2+k]);
806:         }
807:         cnt += 2 + recvs2[cnt+1];
808:         PetscSynchronizedPrintf(comm,"\n");
809:       }
810:     }
811:     PetscSynchronizedFlush(comm);
812:   } /* -----------------------------------  */

814:   /* count number subdomains for each local node */
815:   PetscMalloc(size*sizeof(PetscInt),&nprocs);
816:   PetscMemzero(nprocs,size*sizeof(PetscInt));
817:   cnt  = 0;
818:   for (i=0; i<nrecvs2; i++) {
819:     nt = recvs2[cnt++];
820:     for (j=0; j<nt; j++) {
821:       for (k=0; k<recvs2[cnt+1]; k++) {
822:         nprocs[recvs2[cnt+2+k]]++;
823:       }
824:       cnt += 2 + recvs2[cnt+1];
825:     }
826:   }
827:   nt = 0; for (i=0; i<size; i++) nt += (nprocs[i] > 0);
828:   *nproc    = nt;
829:   PetscMalloc((nt+1)*sizeof(PetscInt),procs);
830:   PetscMalloc((nt+1)*sizeof(PetscInt),numprocs);
831:   PetscMalloc((nt+1)*sizeof(PetscInt*),indices);
832:   PetscMalloc(size*sizeof(PetscInt),&bprocs);
833:   cnt       = 0;
834:   for (i=0; i<size; i++) {
835:     if (nprocs[i] > 0) {
836:       bprocs[i]        = cnt;
837:       (*procs)[cnt]    = i;
838:       (*numprocs)[cnt] = nprocs[i];
839:       PetscMalloc(nprocs[i]*sizeof(PetscInt),&(*indices)[cnt]);
840:       cnt++;
841:     }
842:   }

844:   /* make the list of subdomains for each nontrivial local node */
845:   PetscMemzero(*numprocs,nt*sizeof(PetscInt));
846:   cnt  = 0;
847:   for (i=0; i<nrecvs2; i++) {
848:     nt = recvs2[cnt++];
849:     for (j=0; j<nt; j++) {
850:       for (k=0; k<recvs2[cnt+1]; k++) {
851:         (*indices)[bprocs[recvs2[cnt+2+k]]][(*numprocs)[bprocs[recvs2[cnt+2+k]]]++] = recvs2[cnt];
852:       }
853:       cnt += 2 + recvs2[cnt+1];
854:     }
855:   }
856:   PetscFree(bprocs);
857:   PetscFree(recvs2);

859:   /* sort the node indexing by their global numbers */
860:   nt = *nproc;
861:   for (i=0; i<nt; i++) {
862:     PetscMalloc(((*numprocs)[i])*sizeof(PetscInt),&tmp);
863:     for (j=0; j<(*numprocs)[i]; j++) {
864:       tmp[j] = lindices[(*indices)[i][j]];
865:     }
866:     PetscSortIntWithArray((*numprocs)[i],tmp,(*indices)[i]);
867:     PetscFree(tmp);
868:   }

870:   if (debug) { /* -----------------------------------  */
871:     nt = *nproc;
872:     for (i=0; i<nt; i++) {
873:       PetscSynchronizedPrintf(comm,"[%d] subdomain %d number of indices %d: ",rank,(*procs)[i],(*numprocs)[i]);
874:       for (j=0; j<(*numprocs)[i]; j++) {
875:         PetscSynchronizedPrintf(comm,"%d ",(*indices)[i][j]);
876:       }
877:       PetscSynchronizedPrintf(comm,"\n");
878:     }
879:     PetscSynchronizedFlush(comm);
880:   } /* -----------------------------------  */

882:   /* wait on sends */
883:   if (nsends2) {
884:     PetscMalloc(nsends2*sizeof(MPI_Status),&send_status);
885:     MPI_Waitall(nsends2,send_waits,send_status);
886:     PetscFree(send_status);
887:   }

889:   PetscFree(starts3);
890:   PetscFree(dest);
891:   PetscFree(send_waits);

893:   PetscFree(nownedsenders);
894:   PetscFree(ownedsenders);
895:   PetscFree(starts);
896:   PetscFree(starts2);
897:   PetscFree(lens2);

899:   PetscFree(source);
900:   PetscFree(len);
901:   PetscFree(recvs);
902:   PetscFree(nprocs);
903:   PetscFree(sends2);

905:   /* put the information about myself as the first entry in the list */
906:   first_procs    = (*procs)[0];
907:   first_numprocs = (*numprocs)[0];
908:   first_indices  = (*indices)[0];
909:   for (i=0; i<*nproc; i++) {
910:     if ((*procs)[i] == rank) {
911:       (*procs)[0]    = (*procs)[i];
912:       (*numprocs)[0] = (*numprocs)[i];
913:       (*indices)[0]  = (*indices)[i];
914:       (*procs)[i]    = first_procs;
915:       (*numprocs)[i] = first_numprocs;
916:       (*indices)[i]  = first_indices;
917:       break;
918:     }
919:   }
920:   return(0);
921: }

925: /*@C
926:     ISLocalToGlobalMappingRestoreInfo - Frees the memory allocated by ISLocalToGlobalMappingGetInfo()

928:     Collective on ISLocalToGlobalMapping

930:     Input Parameters:
931: .   mapping - the mapping from local to global indexing

933:     Output Parameter:
934: +   nproc - number of processors that are connected to this one
935: .   proc - neighboring processors
936: .   numproc - number of indices for each processor
937: -   indices - indices of local nodes shared with neighbor (sorted by global numbering)

939:     Level: advanced

941: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreate(),
942:           ISLocalToGlobalMappingGetInfo()
943: @*/
944: PetscErrorCode  ISLocalToGlobalMappingRestoreInfo(ISLocalToGlobalMapping mapping,PetscInt *nproc,PetscInt *procs[],PetscInt *numprocs[],PetscInt **indices[])
945: {
947:   PetscInt i;

950:   PetscFree(*procs);
951:   PetscFree(*numprocs);
952:   if (*indices) {
953:     PetscFree((*indices)[0]);
954:     for (i=1; i<*nproc; i++) {
955:       PetscFree((*indices)[i]);
956:     }
957:     PetscFree(*indices);
958:   }
959:   return(0);
960: }