Actual source code: isltog.c

  1: #define PETSCVEC_DLL

 3:  #include petscsys.h
 4:  #include src/vec/is/isimpl.h

  6: EXTERN PetscErrorCode PETSCVEC_DLLEXPORT VecInitializePackage(char *);
  7: PetscCookie PETSCVEC_DLLEXPORT IS_LTOGM_COOKIE = -1;

 11: /*@C
 12:     ISLocalToGlobalMappingGetSize - Gets the local size of a local to global mapping.

 14:     Not Collective

 16:     Input Parameter:
 17: .   ltog - local to global mapping

 19:     Output Parameter:
 20: .   n - the number of entries in the local mapping

 22:     Level: advanced

 24:     Concepts: mapping^local to global

 26: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate()
 27: @*/
 28: PetscErrorCode PETSCVEC_DLLEXPORT ISLocalToGlobalMappingGetSize(ISLocalToGlobalMapping mapping,PetscInt *n)
 29: {
 33:   *n = mapping->n;
 34:   return(0);
 35: }

 39: /*@C
 40:     ISLocalToGlobalMappingView - View a local to global mapping

 42:     Not Collective

 44:     Input Parameters:
 45: +   ltog - local to global mapping
 46: -   viewer - viewer

 48:     Level: advanced

 50:     Concepts: mapping^local to global

 52: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate()
 53: @*/
 54: PetscErrorCode PETSCVEC_DLLEXPORT ISLocalToGlobalMappingView(ISLocalToGlobalMapping mapping,PetscViewer viewer)
 55: {
 56:   PetscInt        i;
 57:   PetscMPIInt     rank;
 58:   PetscTruth      iascii;
 59:   PetscErrorCode  ierr;

 63:   if (!viewer) viewer = PETSC_VIEWER_STDOUT_(mapping->comm);

 66:   MPI_Comm_rank(mapping->comm,&rank);
 67:   PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&iascii);
 68:   if (iascii) {
 69:     for (i=0; i<mapping->n; i++) {
 70:       PetscViewerASCIISynchronizedPrintf(viewer,"[%d] %d %d\n",rank,i,mapping->indices[i]);
 71:     }
 72:     PetscViewerFlush(viewer);
 73:   } else {
 74:     SETERRQ1(PETSC_ERR_SUP,"Viewer type %s not supported for ISLocalToGlobalMapping",((PetscObject)viewer)->type_name);
 75:   }

 77:   return(0);
 78: }

 82: /*@C
 83:     ISLocalToGlobalMappingCreateIS - Creates a mapping between a local (0 to n)
 84:     ordering and a global parallel ordering.

 86:     Not collective

 88:     Input Parameter:
 89: .   is - index set containing the global numbers for each local

 91:     Output Parameter:
 92: .   mapping - new mapping data structure

 94:     Level: advanced

 96:     Concepts: mapping^local to global

 98: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate()
 99: @*/
100: PetscErrorCode PETSCVEC_DLLEXPORT ISLocalToGlobalMappingCreateIS(IS is,ISLocalToGlobalMapping *mapping)
101: {
103:   PetscInt      n,*indices;
104:   MPI_Comm comm;


110:   PetscObjectGetComm((PetscObject)is,&comm);
111:   ISGetLocalSize(is,&n);
112:   ISGetIndices(is,&indices);
113:   ISLocalToGlobalMappingCreate(comm,n,indices,mapping);
114:   ISRestoreIndices(is,&indices);

116:   return(0);
117: }


122: /*@C
123:     ISLocalToGlobalMappingCreate - Creates a mapping between a local (0 to n)
124:     ordering and a global parallel ordering.

126:     Not Collective, but communicator may have more than one process

128:     Input Parameters:
129: +   comm - MPI communicator
130: .   n - the number of local elements
131: -   indices - the global index for each local element

133:     Output Parameter:
134: .   mapping - new mapping data structure

136:     Level: advanced

138:     Concepts: mapping^local to global

140: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreateNC()
141: @*/
142: PetscErrorCode PETSCVEC_DLLEXPORT ISLocalToGlobalMappingCreate(MPI_Comm cm,PetscInt n,const PetscInt indices[],ISLocalToGlobalMapping *mapping)
143: {
145:   PetscInt       *in;

150:   PetscMalloc(n*sizeof(PetscInt),&in);
151:   PetscMemcpy(in,indices,n*sizeof(PetscInt));
152:   ISLocalToGlobalMappingCreateNC(cm,n,in,mapping);
153:   return(0);
154: }

158: /*@C
159:     ISLocalToGlobalMappingCreateNC - Creates a mapping between a local (0 to n)
160:     ordering and a global parallel ordering.

162:     Not Collective, but communicator may have more than one process

164:     Input Parameters:
165: +   comm - MPI communicator
166: .   n - the number of local elements
167: -   indices - the global index for each local element

169:     Output Parameter:
170: .   mapping - new mapping data structure

172:     Level: developer

174:     Notes: Does not copy the indices, just keeps the pointer to the indices. The ISLocalToGlobalMappingDestroy()
175:     will free the space so it must be obtained with PetscMalloc() and it must not be freed elsewhere.

177:     Concepts: mapping^local to global

179: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreate()
180: @*/
181: PetscErrorCode PETSCVEC_DLLEXPORT ISLocalToGlobalMappingCreateNC(MPI_Comm cm,PetscInt n,const PetscInt indices[],ISLocalToGlobalMapping *mapping)
182: {

188:   *mapping = PETSC_NULL;
189: #ifndef PETSC_USE_DYNAMIC_LIBRARIES
190:   VecInitializePackage(PETSC_NULL);
191: #endif
192:   if (IS_LTOGM_COOKIE == -1) {
193:     PetscLogClassRegister(&IS_LTOGM_COOKIE,"IS Local to global mapping");
194:   }

196:   PetscHeaderCreate(*mapping,_p_ISLocalToGlobalMapping,int,IS_LTOGM_COOKIE,0,"ISLocalToGlobalMapping",
197:                     cm,ISLocalToGlobalMappingDestroy,ISLocalToGlobalMappingView);
198:   PetscLogObjectMemory(*mapping,sizeof(struct _p_ISLocalToGlobalMapping)+n*sizeof(PetscInt));

200:   (*mapping)->n       = n;
201:   (*mapping)->indices = (PetscInt*)indices;

203:   /*
204:       Do not create the global to local mapping. This is only created if 
205:      ISGlobalToLocalMapping() is called 
206:   */
207:   (*mapping)->globals = 0;
208:   return(0);
209: }

213: /*@C
214:     ISLocalToGlobalMappingBlock - Creates a blocked index version of an 
215:        ISLocalToGlobalMapping that is appropriate for MatSetLocalToGlobalMappingBlock()
216:        and VecSetLocalToGlobalMappingBlock().

218:     Not Collective, but communicator may have more than one process

220:     Input Parameters:
221: +    inmap - original point-wise mapping
222: -    bs - block size

224:     Output Parameter:
225: .   outmap - block based mapping; the indices are relative to BLOCKS, not individual vector or matrix entries.

227:     Level: advanced

229:     Concepts: mapping^local to global

231: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate(), ISLocalToGlobalMappingCreateIS()
232: @*/
233: PetscErrorCode PETSCVEC_DLLEXPORT ISLocalToGlobalMappingBlock(ISLocalToGlobalMapping inmap,PetscInt bs,ISLocalToGlobalMapping *outmap)
234: {
236:   PetscInt       *ii,i,n;

239:   if (bs > 1) {
240:     n    = inmap->n/bs;
241:     if (n*bs != inmap->n) SETERRQ(PETSC_ERR_ARG_INCOMP,"Pointwise mapping length is not divisible by block size");
242:     PetscMalloc(n*sizeof(PetscInt),&ii);
243:     for (i=0; i<n; i++) {
244:       ii[i] = inmap->indices[bs*i]/bs;
245:     }
246:     ISLocalToGlobalMappingCreate(inmap->comm,n,ii,outmap);
247:     PetscFree(ii);
248:   } else {
249:     *outmap = inmap;
250:     PetscObjectReference((PetscObject)inmap);
251:   }
252:   return(0);
253: }
254: 
257: /*@
258:    ISLocalToGlobalMappingDestroy - Destroys a mapping between a local (0 to n)
259:    ordering and a global parallel ordering.

261:    Note Collective

263:    Input Parameters:
264: .  mapping - mapping data structure

266:    Level: advanced

268: .seealso: ISLocalToGlobalMappingCreate()
269: @*/
270: PetscErrorCode PETSCVEC_DLLEXPORT ISLocalToGlobalMappingDestroy(ISLocalToGlobalMapping mapping)
271: {
275:   if (--mapping->refct > 0) return(0);
276:   if (mapping->refct < 0) {
277:     SETERRQ(PETSC_ERR_PLIB,"Mapping already destroyed");
278:   }

280:   PetscFree(mapping->indices);
281:   if (mapping->globals) {PetscFree(mapping->globals);}
282:   PetscHeaderDestroy(mapping);
283:   return(0);
284: }
285: 
288: /*@
289:     ISLocalToGlobalMappingApplyIS - Creates from an IS in the local numbering
290:     a new index set using the global numbering defined in an ISLocalToGlobalMapping
291:     context.

293:     Not collective

295:     Input Parameters:
296: +   mapping - mapping between local and global numbering
297: -   is - index set in local numbering

299:     Output Parameters:
300: .   newis - index set in global numbering

302:     Level: advanced

304:     Concepts: mapping^local to global

306: .seealso: ISLocalToGlobalMappingApply(), ISLocalToGlobalMappingCreate(),
307:           ISLocalToGlobalMappingDestroy(), ISGlobalToLocalMappingApply()
308: @*/
309: PetscErrorCode PETSCVEC_DLLEXPORT ISLocalToGlobalMappingApplyIS(ISLocalToGlobalMapping mapping,IS is,IS *newis)
310: {
312:   PetscInt            n,i,*idxin,*idxmap,*idxout,Nmax = mapping->n;


319:   ISGetLocalSize(is,&n);
320:   ISGetIndices(is,&idxin);
321:   idxmap = mapping->indices;
322: 
323:   PetscMalloc(n*sizeof(PetscInt),&idxout);
324:   for (i=0; i<n; i++) {
325:     if (idxin[i] >= Nmax) SETERRQ3(PETSC_ERR_ARG_OUTOFRANGE,"Local index %d too large %d (max) at %d",idxin[i],Nmax-1,i);
326:     idxout[i] = idxmap[idxin[i]];
327:   }
328:   ISRestoreIndices(is,&idxin);
329:   ISCreateGeneral(PETSC_COMM_SELF,n,idxout,newis);
330:   PetscFree(idxout);
331:   return(0);
332: }

334: /*MC
335:    ISLocalToGlobalMappingApply - Takes a list of integers in a local numbering
336:    and converts them to the global numbering.

338:    Not collective

340:    Input Parameters:
341: +  mapping - the local to global mapping context
342: .  N - number of integers
343: -  in - input indices in local numbering

345:    Output Parameter:
346: .  out - indices in global numbering

348:    Synopsis:
349:    PetscErrorCode ISLocalToGlobalMappingApply(ISLocalToGlobalMapping mapping,int N,int in[],int out[])

351:    Notes: 
352:    The in and out array parameters may be identical.

354:    Level: advanced

356: .seealso: ISLocalToGlobalMappingCreate(),ISLocalToGlobalMappingDestroy(), 
357:           ISLocalToGlobalMappingApplyIS(),AOCreateBasic(),AOApplicationToPetsc(),
358:           AOPetscToApplication(), ISGlobalToLocalMappingApply()

360:     Concepts: mapping^local to global

362: M*/

364: /* -----------------------------------------------------------------------------------------*/

368: /*
369:     Creates the global fields in the ISLocalToGlobalMapping structure
370: */
371: static PetscErrorCode ISGlobalToLocalMappingSetUp_Private(ISLocalToGlobalMapping mapping)
372: {
374:   PetscInt            i,*idx = mapping->indices,n = mapping->n,end,start,*globals;

377:   end   = 0;
378:   start = 100000000;

380:   for (i=0; i<n; i++) {
381:     if (idx[i] < 0) continue;
382:     if (idx[i] < start) start = idx[i];
383:     if (idx[i] > end)   end   = idx[i];
384:   }
385:   if (start > end) {start = 0; end = -1;}
386:   mapping->globalstart = start;
387:   mapping->globalend   = end;

389:   PetscMalloc((end-start+2)*sizeof(PetscInt),&globals);
390:   mapping->globals = globals;
391:   for (i=0; i<end-start+1; i++) {
392:     globals[i] = -1;
393:   }
394:   for (i=0; i<n; i++) {
395:     if (idx[i] < 0) continue;
396:     globals[idx[i] - start] = i;
397:   }

399:   PetscLogObjectMemory(mapping,(end-start+1)*sizeof(PetscInt));
400:   return(0);
401: }

405: /*@
406:     ISGlobalToLocalMappingApply - Provides the local numbering for a list of integers
407:     specified with a global numbering.

409:     Not collective

411:     Input Parameters:
412: +   mapping - mapping between local and global numbering
413: .   type - IS_GTOLM_MASK - replaces global indices with no local value with -1
414:            IS_GTOLM_DROP - drops the indices with no local value from the output list
415: .   n - number of global indices to map
416: -   idx - global indices to map

418:     Output Parameters:
419: +   nout - number of indices in output array (if type == IS_GTOLM_MASK then nout = n)
420: -   idxout - local index of each global index, one must pass in an array long enough 
421:              to hold all the indices. You can call ISGlobalToLocalMappingApply() with 
422:              idxout == PETSC_NULL to determine the required length (returned in nout)
423:              and then allocate the required space and call ISGlobalToLocalMappingApply()
424:              a second time to set the values.

426:     Notes:
427:     Either nout or idxout may be PETSC_NULL. idx and idxout may be identical.

429:     This is not scalable in memory usage. Each processor requires O(Nglobal) size 
430:     array to compute these.

432:     Level: advanced

434:     Concepts: mapping^global to local

436: .seealso: ISLocalToGlobalMappingApply(), ISLocalToGlobalMappingCreate(),
437:           ISLocalToGlobalMappingDestroy()
438: @*/
439: PetscErrorCode PETSCVEC_DLLEXPORT ISGlobalToLocalMappingApply(ISLocalToGlobalMapping mapping,ISGlobalToLocalMappingType type,
440:                                   PetscInt n,const PetscInt idx[],PetscInt *nout,PetscInt idxout[])
441: {
442:   PetscInt i,*globals,nf = 0,tmp,start,end;

446:   if (!mapping->globals) {
447:     ISGlobalToLocalMappingSetUp_Private(mapping);
448:   }
449:   globals = mapping->globals;
450:   start   = mapping->globalstart;
451:   end     = mapping->globalend;

453:   if (type == IS_GTOLM_MASK) {
454:     if (idxout) {
455:       for (i=0; i<n; i++) {
456:         if (idx[i] < 0) idxout[i] = idx[i];
457:         else if (idx[i] < start) idxout[i] = -1;
458:         else if (idx[i] > end)   idxout[i] = -1;
459:         else                     idxout[i] = globals[idx[i] - start];
460:       }
461:     }
462:     if (nout) *nout = n;
463:   } else {
464:     if (idxout) {
465:       for (i=0; i<n; i++) {
466:         if (idx[i] < 0) continue;
467:         if (idx[i] < start) continue;
468:         if (idx[i] > end) continue;
469:         tmp = globals[idx[i] - start];
470:         if (tmp < 0) continue;
471:         idxout[nf++] = tmp;
472:       }
473:     } else {
474:       for (i=0; i<n; i++) {
475:         if (idx[i] < 0) continue;
476:         if (idx[i] < start) continue;
477:         if (idx[i] > end) continue;
478:         tmp = globals[idx[i] - start];
479:         if (tmp < 0) continue;
480:         nf++;
481:       }
482:     }
483:     if (nout) *nout = nf;
484:   }

486:   return(0);
487: }

491: /*@C
492:     ISLocalToGlobalMappingGetInfo - Gets the neighbor information for each processor and 
493:      each index shared by more than one processor 

495:     Collective on ISLocalToGlobalMapping

497:     Input Parameters:
498: .   mapping - the mapping from local to global indexing

500:     Output Parameter:
501: +   nproc - number of processors that are connected to this one
502: .   proc - neighboring processors
503: .   numproc - number of indices for each subdomain (processor)
504: -   indices - indices of local nodes shared with neighbor (sorted by global numbering)

506:     Level: advanced

508:     Concepts: mapping^local to global

510: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreate(),
511:           ISLocalToGlobalMappingRestoreInfo()
512: @*/
513: PetscErrorCode PETSCVEC_DLLEXPORT ISLocalToGlobalMappingGetInfo(ISLocalToGlobalMapping mapping,PetscInt *nproc,PetscInt *procs[],PetscInt *numprocs[],PetscInt **indices[])
514: {
516:   PetscMPIInt    size,rank,tag1,tag2,tag3,*len,*source,imdex;
517:   PetscInt       i,n = mapping->n,Ng,ng,max = 0,*lindices = mapping->indices;
518:   PetscInt       *nprocs,*owner,nsends,*sends,j,*starts,nmax,nrecvs,*recvs,proc;
519:   PetscInt       cnt,scale,*ownedsenders,*nownedsenders,rstart,nowned;
520:   PetscInt       node,nownedm,nt,*sends2,nsends2,*starts2,*lens2,*dest,nrecvs2,*starts3,*recvs2,k,*bprocs,*tmp;
521:   PetscInt       first_procs,first_numprocs,*first_indices;
522:   MPI_Request    *recv_waits,*send_waits;
523:   MPI_Status     recv_status,*send_status,*recv_statuses;
524:   MPI_Comm       comm = mapping->comm;
525:   PetscTruth     debug = PETSC_FALSE;

528:   MPI_Comm_size(comm,&size);
529:   MPI_Comm_rank(comm,&rank);
530:   if (size == 1) {
531:     *nproc         = 0;
532:     *procs         = PETSC_NULL;
533:     PetscMalloc(sizeof(PetscInt),numprocs);
534:     (*numprocs)[0] = 0;
535:     PetscMalloc(sizeof(PetscInt*),indices);
536:     (*indices)[0]  = PETSC_NULL;
537:     return(0);
538:   }

540:   PetscOptionsHasName(PETSC_NULL,"-islocaltoglobalmappinggetinfo_debug",&debug);

542:   /*
543:     Notes on ISLocalToGlobalMappingGetInfo

545:     globally owned node - the nodes that have been assigned to this processor in global
546:            numbering, just for this routine.

548:     nontrivial globally owned node - node assigned to this processor that is on a subdomain
549:            boundary (i.e. is has more than one local owner)

551:     locally owned node - node that exists on this processors subdomain

553:     nontrivial locally owned node - node that is not in the interior (i.e. has more than one
554:            local subdomain
555:   */
556:   PetscObjectGetNewTag((PetscObject)mapping,&tag1);
557:   PetscObjectGetNewTag((PetscObject)mapping,&tag2);
558:   PetscObjectGetNewTag((PetscObject)mapping,&tag3);

560:   for (i=0; i<n; i++) {
561:     if (lindices[i] > max) max = lindices[i];
562:   }
563:   MPI_Allreduce(&max,&Ng,1,MPIU_INT,MPI_MAX,comm);
564:   Ng++;
565:   MPI_Comm_size(comm,&size);
566:   MPI_Comm_rank(comm,&rank);
567:   scale  = Ng/size + 1;
568:   ng     = scale; if (rank == size-1) ng = Ng - scale*(size-1); ng = PetscMax(1,ng);
569:   rstart = scale*rank;

571:   /* determine ownership ranges of global indices */
572:   PetscMalloc(2*size*sizeof(PetscInt),&nprocs);
573:   PetscMemzero(nprocs,2*size*sizeof(PetscInt));

575:   /* determine owners of each local node  */
576:   PetscMalloc(n*sizeof(PetscInt),&owner);
577:   for (i=0; i<n; i++) {
578:     proc             = lindices[i]/scale; /* processor that globally owns this index */
579:     nprocs[2*proc+1] = 1;                 /* processor globally owns at least one of ours */
580:     owner[i]         = proc;
581:     nprocs[2*proc]++;                     /* count of how many that processor globally owns of ours */
582:   }
583:   nsends = 0; for (i=0; i<size; i++) nsends += nprocs[2*i+1];
584:   PetscLogInfo((0,"ISLocalToGlobalMappingGetInfo: Number of global owners for my local data %d\n",nsends));

586:   /* inform other processors of number of messages and max length*/
587:   PetscMaxSum(comm,nprocs,&nmax,&nrecvs);
588:   PetscLogInfo((0,"ISLocalToGlobalMappingGetInfo: Number of local owners for my global data %d\n",nrecvs));

590:   /* post receives for owned rows */
591:   PetscMalloc((2*nrecvs+1)*(nmax+1)*sizeof(PetscInt),&recvs);
592:   PetscMalloc((nrecvs+1)*sizeof(MPI_Request),&recv_waits);
593:   for (i=0; i<nrecvs; i++) {
594:     MPI_Irecv(recvs+2*nmax*i,2*nmax,MPIU_INT,MPI_ANY_SOURCE,tag1,comm,recv_waits+i);
595:   }

597:   /* pack messages containing lists of local nodes to owners */
598:   PetscMalloc((2*n+1)*sizeof(PetscInt),&sends);
599:   PetscMalloc((size+1)*sizeof(PetscInt),&starts);
600:   starts[0]  = 0;
601:   for (i=1; i<size; i++) { starts[i] = starts[i-1] + 2*nprocs[2*i-2];}
602:   for (i=0; i<n; i++) {
603:     sends[starts[owner[i]]++] = lindices[i];
604:     sends[starts[owner[i]]++] = i;
605:   }
606:   PetscFree(owner);
607:   starts[0]  = 0;
608:   for (i=1; i<size; i++) { starts[i] = starts[i-1] + 2*nprocs[2*i-2];}

610:   /* send the messages */
611:   PetscMalloc((nsends+1)*sizeof(MPI_Request),&send_waits);
612:   PetscMalloc((nsends+1)*sizeof(PetscInt),&dest);
613:   cnt = 0;
614:   for (i=0; i<size; i++) {
615:     if (nprocs[2*i]) {
616:       MPI_Isend(sends+starts[i],2*nprocs[2*i],MPIU_INT,i,tag1,comm,send_waits+cnt);
617:       dest[cnt] = i;
618:       cnt++;
619:     }
620:   }
621:   PetscFree(starts);

623:   /* wait on receives */
624:   PetscMalloc((nrecvs+1)*sizeof(PetscMPIInt),&source);
625:   PetscMalloc((nrecvs+1)*sizeof(PetscMPIInt),&len);
626:   cnt  = nrecvs;
627:   PetscMalloc((ng+1)*sizeof(PetscInt),&nownedsenders);
628:   PetscMemzero(nownedsenders,ng*sizeof(PetscInt));
629:   while (cnt) {
630:     MPI_Waitany(nrecvs,recv_waits,&imdex,&recv_status);
631:     /* unpack receives into our local space */
632:     MPI_Get_count(&recv_status,MPIU_INT,&len[imdex]);
633:     source[imdex]  = recv_status.MPI_SOURCE;
634:     len[imdex]     = len[imdex]/2;
635:     /* count how many local owners for each of my global owned indices */
636:     for (i=0; i<len[imdex]; i++) nownedsenders[recvs[2*imdex*nmax+2*i]-rstart]++;
637:     cnt--;
638:   }
639:   PetscFree(recv_waits);

641:   /* count how many globally owned indices are on an edge multiplied by how many processors own them. */
642:   nowned  = 0;
643:   nownedm = 0;
644:   for (i=0; i<ng; i++) {
645:     if (nownedsenders[i] > 1) {nownedm += nownedsenders[i]; nowned++;}
646:   }

648:   /* create single array to contain rank of all local owners of each globally owned index */
649:   PetscMalloc((nownedm+1)*sizeof(PetscInt),&ownedsenders);
650:   PetscMalloc((ng+1)*sizeof(PetscInt),&starts);
651:   starts[0] = 0;
652:   for (i=1; i<ng; i++) {
653:     if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
654:     else starts[i] = starts[i-1];
655:   }

657:   /* for each nontrival globally owned node list all arriving processors */
658:   for (i=0; i<nrecvs; i++) {
659:     for (j=0; j<len[i]; j++) {
660:       node = recvs[2*i*nmax+2*j]-rstart;
661:       if (nownedsenders[node] > 1) {
662:         ownedsenders[starts[node]++] = source[i];
663:       }
664:     }
665:   }

667:   if (debug) { /* -----------------------------------  */
668:     starts[0]    = 0;
669:     for (i=1; i<ng; i++) {
670:       if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
671:       else starts[i] = starts[i-1];
672:     }
673:     for (i=0; i<ng; i++) {
674:       if (nownedsenders[i] > 1) {
675:         PetscSynchronizedPrintf(comm,"[%d] global node %d local owner processors: ",rank,i+rstart);
676:         for (j=0; j<nownedsenders[i]; j++) {
677:           PetscSynchronizedPrintf(comm,"%d ",ownedsenders[starts[i]+j]);
678:         }
679:         PetscSynchronizedPrintf(comm,"\n");
680:       }
681:     }
682:     PetscSynchronizedFlush(comm);
683:   }/* -----------------------------------  */

685:   /* wait on original sends */
686:   if (nsends) {
687:     PetscMalloc(nsends*sizeof(MPI_Status),&send_status);
688:     MPI_Waitall(nsends,send_waits,send_status);
689:     PetscFree(send_status);
690:   }
691:   PetscFree(send_waits);
692:   PetscFree(sends);
693:   PetscFree(nprocs);

695:   /* pack messages to send back to local owners */
696:   starts[0]    = 0;
697:   for (i=1; i<ng; i++) {
698:     if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
699:     else starts[i] = starts[i-1];
700:   }
701:   nsends2 = nrecvs;
702:   PetscMalloc((nsends2+1)*sizeof(PetscInt),&nprocs); /* length of each message */
703:   for (i=0; i<nrecvs; i++) {
704:     nprocs[i] = 1;
705:     for (j=0; j<len[i]; j++) {
706:       node = recvs[2*i*nmax+2*j]-rstart;
707:       if (nownedsenders[node] > 1) {
708:         nprocs[i] += 2 + nownedsenders[node];
709:       }
710:     }
711:   }
712:   nt = 0; for (i=0; i<nsends2; i++) nt += nprocs[i];
713:   PetscMalloc((nt+1)*sizeof(PetscInt),&sends2);
714:   PetscMalloc((nsends2+1)*sizeof(PetscInt),&starts2);
715:   starts2[0] = 0; for (i=1; i<nsends2; i++) starts2[i] = starts2[i-1] + nprocs[i-1];
716:   /*
717:      Each message is 1 + nprocs[i] long, and consists of 
718:        (0) the number of nodes being sent back 
719:        (1) the local node number,
720:        (2) the number of processors sharing it,
721:        (3) the processors sharing it
722:   */
723:   for (i=0; i<nsends2; i++) {
724:     cnt = 1;
725:     sends2[starts2[i]] = 0;
726:     for (j=0; j<len[i]; j++) {
727:       node = recvs[2*i*nmax+2*j]-rstart;
728:       if (nownedsenders[node] > 1) {
729:         sends2[starts2[i]]++;
730:         sends2[starts2[i]+cnt++] = recvs[2*i*nmax+2*j+1];
731:         sends2[starts2[i]+cnt++] = nownedsenders[node];
732:         PetscMemcpy(&sends2[starts2[i]+cnt],&ownedsenders[starts[node]],nownedsenders[node]*sizeof(PetscInt));
733:         cnt += nownedsenders[node];
734:       }
735:     }
736:   }

738:   /* receive the message lengths */
739:   nrecvs2 = nsends;
740:   PetscMalloc((nrecvs2+1)*sizeof(PetscInt),&lens2);
741:   PetscMalloc((nrecvs2+1)*sizeof(PetscInt),&starts3);
742:   PetscMalloc((nrecvs2+1)*sizeof(MPI_Request),&recv_waits);
743:   for (i=0; i<nrecvs2; i++) {
744:     MPI_Irecv(&lens2[i],1,MPIU_INT,dest[i],tag2,comm,recv_waits+i);
745:   }

747:   /* send the message lengths */
748:   for (i=0; i<nsends2; i++) {
749:     MPI_Send(&nprocs[i],1,MPIU_INT,source[i],tag2,comm);
750:   }

752:   /* wait on receives of lens */
753:   if (nrecvs2) {
754:     PetscMalloc(nrecvs2*sizeof(MPI_Status),&recv_statuses);
755:     MPI_Waitall(nrecvs2,recv_waits,recv_statuses);
756:     PetscFree(recv_statuses);
757:   }
758:   PetscFree(recv_waits);

760:   starts3[0] = 0;
761:   nt         = 0;
762:   for (i=0; i<nrecvs2-1; i++) {
763:     starts3[i+1] = starts3[i] + lens2[i];
764:     nt          += lens2[i];
765:   }
766:   nt += lens2[nrecvs2-1];

768:   PetscMalloc((nt+1)*sizeof(PetscInt),&recvs2);
769:   PetscMalloc((nrecvs2+1)*sizeof(MPI_Request),&recv_waits);
770:   for (i=0; i<nrecvs2; i++) {
771:     MPI_Irecv(recvs2+starts3[i],lens2[i],MPIU_INT,dest[i],tag3,comm,recv_waits+i);
772:   }
773: 
774:   /* send the messages */
775:   PetscMalloc((nsends2+1)*sizeof(MPI_Request),&send_waits);
776:   for (i=0; i<nsends2; i++) {
777:     MPI_Isend(sends2+starts2[i],nprocs[i],MPIU_INT,source[i],tag3,comm,send_waits+i);
778:   }

780:   /* wait on receives */
781:   if (nrecvs2) {
782:     PetscMalloc(nrecvs2*sizeof(MPI_Status),&recv_statuses);
783:     MPI_Waitall(nrecvs2,recv_waits,recv_statuses);
784:     PetscFree(recv_statuses);
785:   }
786:   PetscFree(recv_waits);
787:   PetscFree(nprocs);

789:   if (debug) { /* -----------------------------------  */
790:     cnt = 0;
791:     for (i=0; i<nrecvs2; i++) {
792:       nt = recvs2[cnt++];
793:       for (j=0; j<nt; j++) {
794:         PetscSynchronizedPrintf(comm,"[%d] local node %d number of subdomains %d: ",rank,recvs2[cnt],recvs2[cnt+1]);
795:         for (k=0; k<recvs2[cnt+1]; k++) {
796:           PetscSynchronizedPrintf(comm,"%d ",recvs2[cnt+2+k]);
797:         }
798:         cnt += 2 + recvs2[cnt+1];
799:         PetscSynchronizedPrintf(comm,"\n");
800:       }
801:     }
802:     PetscSynchronizedFlush(comm);
803:   } /* -----------------------------------  */

805:   /* count number subdomains for each local node */
806:   PetscMalloc(size*sizeof(PetscInt),&nprocs);
807:   PetscMemzero(nprocs,size*sizeof(PetscInt));
808:   cnt  = 0;
809:   for (i=0; i<nrecvs2; i++) {
810:     nt = recvs2[cnt++];
811:     for (j=0; j<nt; j++) {
812:       for (k=0; k<recvs2[cnt+1]; k++) {
813:         nprocs[recvs2[cnt+2+k]]++;
814:       }
815:       cnt += 2 + recvs2[cnt+1];
816:     }
817:   }
818:   nt = 0; for (i=0; i<size; i++) nt += (nprocs[i] > 0);
819:   *nproc    = nt;
820:   PetscMalloc((nt+1)*sizeof(PetscInt),procs);
821:   PetscMalloc((nt+1)*sizeof(PetscInt),numprocs);
822:   PetscMalloc((nt+1)*sizeof(PetscInt*),indices);
823:   PetscMalloc(size*sizeof(PetscInt),&bprocs);
824:   cnt       = 0;
825:   for (i=0; i<size; i++) {
826:     if (nprocs[i] > 0) {
827:       bprocs[i]        = cnt;
828:       (*procs)[cnt]    = i;
829:       (*numprocs)[cnt] = nprocs[i];
830:       PetscMalloc(nprocs[i]*sizeof(PetscInt),&(*indices)[cnt]);
831:       cnt++;
832:     }
833:   }

835:   /* make the list of subdomains for each nontrivial local node */
836:   PetscMemzero(*numprocs,nt*sizeof(PetscInt));
837:   cnt  = 0;
838:   for (i=0; i<nrecvs2; i++) {
839:     nt = recvs2[cnt++];
840:     for (j=0; j<nt; j++) {
841:       for (k=0; k<recvs2[cnt+1]; k++) {
842:         (*indices)[bprocs[recvs2[cnt+2+k]]][(*numprocs)[bprocs[recvs2[cnt+2+k]]]++] = recvs2[cnt];
843:       }
844:       cnt += 2 + recvs2[cnt+1];
845:     }
846:   }
847:   PetscFree(bprocs);
848:   PetscFree(recvs2);

850:   /* sort the node indexing by their global numbers */
851:   nt = *nproc;
852:   for (i=0; i<nt; i++) {
853:     PetscMalloc(((*numprocs)[i])*sizeof(PetscInt),&tmp);
854:     for (j=0; j<(*numprocs)[i]; j++) {
855:       tmp[j] = lindices[(*indices)[i][j]];
856:     }
857:     PetscSortIntWithArray((*numprocs)[i],tmp,(*indices)[i]);
858:     PetscFree(tmp);
859:   }

861:   if (debug) { /* -----------------------------------  */
862:     nt = *nproc;
863:     for (i=0; i<nt; i++) {
864:       PetscSynchronizedPrintf(comm,"[%d] subdomain %d number of indices %d: ",rank,(*procs)[i],(*numprocs)[i]);
865:       for (j=0; j<(*numprocs)[i]; j++) {
866:         PetscSynchronizedPrintf(comm,"%d ",(*indices)[i][j]);
867:       }
868:       PetscSynchronizedPrintf(comm,"\n");
869:     }
870:     PetscSynchronizedFlush(comm);
871:   } /* -----------------------------------  */

873:   /* wait on sends */
874:   if (nsends2) {
875:     PetscMalloc(nsends2*sizeof(MPI_Status),&send_status);
876:     MPI_Waitall(nsends2,send_waits,send_status);
877:     PetscFree(send_status);
878:   }

880:   PetscFree(starts3);
881:   PetscFree(dest);
882:   PetscFree(send_waits);

884:   PetscFree(nownedsenders);
885:   PetscFree(ownedsenders);
886:   PetscFree(starts);
887:   PetscFree(starts2);
888:   PetscFree(lens2);

890:   PetscFree(source);
891:   PetscFree(len);
892:   PetscFree(recvs);
893:   PetscFree(nprocs);
894:   PetscFree(sends2);

896:   /* put the information about myself as the first entry in the list */
897:   first_procs    = (*procs)[0];
898:   first_numprocs = (*numprocs)[0];
899:   first_indices  = (*indices)[0];
900:   for (i=0; i<*nproc; i++) {
901:     if ((*procs)[i] == rank) {
902:       (*procs)[0]    = (*procs)[i];
903:       (*numprocs)[0] = (*numprocs)[i];
904:       (*indices)[0]  = (*indices)[i];
905:       (*procs)[i]    = first_procs;
906:       (*numprocs)[i] = first_numprocs;
907:       (*indices)[i]  = first_indices;
908:       break;
909:     }
910:   }
911:   return(0);
912: }

916: /*@C
917:     ISLocalToGlobalMappingRestoreInfo - Frees the memory allocated by ISLocalToGlobalMappingGetInfo()

919:     Collective on ISLocalToGlobalMapping

921:     Input Parameters:
922: .   mapping - the mapping from local to global indexing

924:     Output Parameter:
925: +   nproc - number of processors that are connected to this one
926: .   proc - neighboring processors
927: .   numproc - number of indices for each processor
928: -   indices - indices of local nodes shared with neighbor (sorted by global numbering)

930:     Level: advanced

932: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreate(),
933:           ISLocalToGlobalMappingGetInfo()
934: @*/
935: PetscErrorCode PETSCVEC_DLLEXPORT ISLocalToGlobalMappingRestoreInfo(ISLocalToGlobalMapping mapping,PetscInt *nproc,PetscInt *procs[],PetscInt *numprocs[],PetscInt **indices[])
936: {
938:   PetscInt i;

941:   if (*procs) {PetscFree(*procs);}
942:   if (*numprocs) {PetscFree(*numprocs);}
943:   if (*indices) {
944:     if ((*indices)[0]) {PetscFree((*indices)[0]);}
945:     for (i=1; i<*nproc; i++) {
946:       if ((*indices)[i]) {PetscFree((*indices)[i]);}
947:     }
948:     PetscFree(*indices);
949:   }
950:   return(0);
951: }