Actual source code: isltog.c
1: /*$Id: isltog.c,v 1.65 2001/05/21 14:16:29 bsmith Exp $*/
3: #include petscsys.h
4: #include src/vec/is/isimpl.h
6: EXTERN int VecInitializePackage(char *);
8: /*@C
9: ISLocalToGlobalMappingGetSize - Gets the local size of a local to global mapping.
11: Not Collective
13: Input Parameter:
14: . ltog - local to global mapping
16: Output Parameter:
17: . n - the number of entries in the local mapping
19: Level: advanced
21: Concepts: mapping^local to global
23: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate()
24: @*/
25: int ISLocalToGlobalMappingGetSize(ISLocalToGlobalMapping mapping,int *n)
26: {
29: *n = mapping->n;
30: return(0);
31: }
33: /*@C
34: ISLocalToGlobalMappingView - View a local to global mapping
36: Not Collective
38: Input Parameters:
39: + ltog - local to global mapping
40: - viewer - viewer
42: Level: advanced
44: Concepts: mapping^local to global
46: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate()
47: @*/
48: int ISLocalToGlobalMappingView(ISLocalToGlobalMapping mapping,PetscViewer viewer)
49: {
50: int i,ierr,rank;
51: PetscTruth isascii;
55: if (!viewer) viewer = PETSC_VIEWER_STDOUT_(mapping->comm);
58: MPI_Comm_rank(mapping->comm,&rank);
59: PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&isascii);
60: if (isascii) {
61: for (i=0; i<mapping->n; i++) {
62: PetscViewerASCIISynchronizedPrintf(viewer,"[%d] %d %dn",rank,i,mapping->indices[i]);
63: }
64: PetscViewerFlush(viewer);
65: } else {
66: SETERRQ1(1,"Viewer type %s not supported for ISLocalToGlobalMapping",((PetscObject)viewer)->type_name);
67: }
69: return(0);
70: }
72: /*@C
73: ISLocalToGlobalMappingCreateIS - Creates a mapping between a local (0 to n)
74: ordering and a global parallel ordering.
76: Not collective
78: Input Parameter:
79: . is - index set containing the global numbers for each local
81: Output Parameter:
82: . mapping - new mapping data structure
84: Level: advanced
86: Concepts: mapping^local to global
88: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate()
89: @*/
90: int ISLocalToGlobalMappingCreateIS(IS is,ISLocalToGlobalMapping *mapping)
91: {
92: int n,*indices,ierr;
93: MPI_Comm comm;
98: PetscObjectGetComm((PetscObject)is,&comm);
99: ISGetLocalSize(is,&n);
100: ISGetIndices(is,&indices);
101: ISLocalToGlobalMappingCreate(comm,n,indices,mapping);
102: ISRestoreIndices(is,&indices);
104: return(0);
105: }
108: /*@C
109: ISLocalToGlobalMappingCreate - Creates a mapping between a local (0 to n)
110: ordering and a global parallel ordering.
112: Not Collective, but communicator may have more than one process
114: Input Parameters:
115: + comm - MPI communicator
116: . n - the number of local elements
117: - indices - the global index for each local element
119: Output Parameter:
120: . mapping - new mapping data structure
122: Level: advanced
124: Concepts: mapping^local to global
126: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreateNC()
127: @*/
128: int ISLocalToGlobalMappingCreate(MPI_Comm cm,int n,const int indices[],ISLocalToGlobalMapping *mapping)
129: {
130: int *in,ierr;
135: PetscMalloc((n+1)*sizeof(int),&in);
136: PetscMemcpy(in,indices,n*sizeof(int));
137: ISLocalToGlobalMappingCreateNC(cm,n,in,mapping);
138: return(0);
139: }
141: /*@C
142: ISLocalToGlobalMappingCreateNC - Creates a mapping between a local (0 to n)
143: ordering and a global parallel ordering.
145: Not Collective, but communicator may have more than one process
147: Input Parameters:
148: + comm - MPI communicator
149: . n - the number of local elements
150: - indices - the global index for each local element
152: Output Parameter:
153: . mapping - new mapping data structure
155: Level: developer
157: Notes: Does not copy the indices, just keeps the pointer to the indices. The ISLocalToGlobalMappingDestroy()
158: will free the space so it must be obtained with PetscMalloc() and it must not be freed elsewhere.
160: Concepts: mapping^local to global
162: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreate()
163: @*/
164: int ISLocalToGlobalMappingCreateNC(MPI_Comm cm,int n,const int indices[],ISLocalToGlobalMapping *mapping)
165: {
169: *mapping = PETSC_NULL;
170: #ifndef PETSC_USE_DYNAMIC_LIBRARIES
171: {int VecInitializePackage(PETSC_NULL); }
172: #endif
174: PetscHeaderCreate(*mapping,_p_ISLocalToGlobalMapping,int,IS_LTOGM_COOKIE,0,"ISLocalToGlobalMapping",
175: cm,ISLocalToGlobalMappingDestroy,ISLocalToGlobalMappingView);
176: PetscLogObjectCreate(*mapping);
177: PetscLogObjectMemory(*mapping,sizeof(struct _p_ISLocalToGlobalMapping)+n*sizeof(int));
179: (*mapping)->n = n;
180: (*mapping)->indices = (int*)indices;
182: /*
183: Do not create the global to local mapping. This is only created if
184: ISGlobalToLocalMapping() is called
185: */
186: (*mapping)->globals = 0;
187: return(0);
188: }
190: /*@C
191: ISLocalToGlobalMappingBlock - Creates a blocked index version of an
192: ISLocalToGlobalMapping that is appropriate for MatSetLocalToGlobalMappingBlock()
193: and VecSetLocalToGlobalMappingBlock().
195: Not Collective, but communicator may have more than one process
197: Input Parameters:
198: + inmap - original point-wise mapping
199: - bs - block size
201: Output Parameter:
202: . outmap - block based mapping
204: Level: advanced
206: Concepts: mapping^local to global
208: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate(), ISLocalToGlobalMappingCreateIS()
209: @*/
210: int ISLocalToGlobalMappingBlock(ISLocalToGlobalMapping inmap,int bs,ISLocalToGlobalMapping *outmap)
211: {
212: int ierr,*ii,i,n;
216: if (bs > 1) {
217: n = inmap->n/bs;
218: PetscMalloc(n*sizeof(int),&ii);
219: for (i=0; i<n; i++) {
220: ii[i] = inmap->indices[bs*i]/bs;
221: }
222: ISLocalToGlobalMappingCreate(inmap->comm,n,ii,outmap);
223: PetscFree(ii);
224: } else {
225: *outmap = inmap;
226: ierr = PetscObjectReference((PetscObject)inmap);
227: }
228: return(0);
229: }
230:
231: /*@
232: ISLocalToGlobalMappingDestroy - Destroys a mapping between a local (0 to n)
233: ordering and a global parallel ordering.
235: Note Collective
237: Input Parameters:
238: . mapping - mapping data structure
240: Level: advanced
242: .seealso: ISLocalToGlobalMappingCreate()
243: @*/
244: int ISLocalToGlobalMappingDestroy(ISLocalToGlobalMapping mapping)
245: {
249: if (--mapping->refct > 0) return(0);
250: if (mapping->refct < 0) {
251: SETERRQ(1,"Mapping already destroyed");
252: }
254: PetscFree(mapping->indices);
255: if (mapping->globals) {PetscFree(mapping->globals);}
256: PetscLogObjectDestroy(mapping);
257: PetscHeaderDestroy(mapping);
258: return(0);
259: }
260:
261: /*@
262: ISLocalToGlobalMappingApplyIS - Creates from an IS in the local numbering
263: a new index set using the global numbering defined in an ISLocalToGlobalMapping
264: context.
266: Not collective
268: Input Parameters:
269: + mapping - mapping between local and global numbering
270: - is - index set in local numbering
272: Output Parameters:
273: . newis - index set in global numbering
275: Level: advanced
277: Concepts: mapping^local to global
279: .seealso: ISLocalToGlobalMappingApply(), ISLocalToGlobalMappingCreate(),
280: ISLocalToGlobalMappingDestroy(), ISGlobalToLocalMappingApply()
281: @*/
282: int ISLocalToGlobalMappingApplyIS(ISLocalToGlobalMapping mapping,IS is,IS *newis)
283: {
284: int ierr,n,i,*idxin,*idxmap,*idxout,Nmax = mapping->n;
291: ierr = ISGetLocalSize(is,&n);
292: ierr = ISGetIndices(is,&idxin);
293: idxmap = mapping->indices;
294:
295: PetscMalloc((n+1)*sizeof(int),&idxout);
296: for (i=0; i<n; i++) {
297: if (idxin[i] >= Nmax) SETERRQ3(PETSC_ERR_ARG_OUTOFRANGE,"Local index %d too large %d (max) at %d",idxin[i],Nmax,i);
298: idxout[i] = idxmap[idxin[i]];
299: }
300: ISRestoreIndices(is,&idxin);
301: ISCreateGeneral(PETSC_COMM_SELF,n,idxout,newis);
302: PetscFree(idxout);
303: return(0);
304: }
306: /*MC
307: ISLocalToGlobalMappingApply - Takes a list of integers in a local numbering
308: and converts them to the global numbering.
310: Not collective
312: Input Parameters:
313: + mapping - the local to global mapping context
314: . N - number of integers
315: - in - input indices in local numbering
317: Output Parameter:
318: . out - indices in global numbering
320: Synopsis:
321: int ISLocalToGlobalMappingApply(ISLocalToGlobalMapping mapping,int N,int in[],int out[])
323: Notes:
324: The in and out array parameters may be identical.
326: Level: advanced
328: .seealso: ISLocalToGlobalMappingCreate(),ISLocalToGlobalMappingDestroy(),
329: ISLocalToGlobalMappingApplyIS(),AOCreateBasic(),AOApplicationToPetsc(),
330: AOPetscToApplication(), ISGlobalToLocalMappingApply()
332: Concepts: mapping^local to global
334: M*/
336: /* -----------------------------------------------------------------------------------------*/
338: /*
339: Creates the global fields in the ISLocalToGlobalMapping structure
340: */
341: static int ISGlobalToLocalMappingSetUp_Private(ISLocalToGlobalMapping mapping)
342: {
343: int ierr,i,*idx = mapping->indices,n = mapping->n,end,start,*globals;
346: end = 0;
347: start = 100000000;
349: for (i=0; i<n; i++) {
350: if (idx[i] < 0) continue;
351: if (idx[i] < start) start = idx[i];
352: if (idx[i] > end) end = idx[i];
353: }
354: if (start > end) {start = 0; end = -1;}
355: mapping->globalstart = start;
356: mapping->globalend = end;
358: ierr = PetscMalloc((end-start+2)*sizeof(int),&globals);
359: mapping->globals = globals;
360: for (i=0; i<end-start+1; i++) {
361: globals[i] = -1;
362: }
363: for (i=0; i<n; i++) {
364: if (idx[i] < 0) continue;
365: globals[idx[i] - start] = i;
366: }
368: PetscLogObjectMemory(mapping,(end-start+1)*sizeof(int));
369: return(0);
370: }
372: /*@
373: ISGlobalToLocalMappingApply - Provides the local numbering for a list of integers
374: specified with a global numbering.
376: Not collective
378: Input Parameters:
379: + mapping - mapping between local and global numbering
380: . type - IS_GTOLM_MASK - replaces global indices with no local value with -1
381: IS_GTOLM_DROP - drops the indices with no local value from the output list
382: . n - number of global indices to map
383: - idx - global indices to map
385: Output Parameters:
386: + nout - number of indices in output array (if type == IS_GTOLM_MASK then nout = n)
387: - idxout - local index of each global index, one must pass in an array long enough
388: to hold all the indices. You can call ISGlobalToLocalMappingApply() with
389: idxout == PETSC_NULL to determine the required length (returned in nout)
390: and then allocate the required space and call ISGlobalToLocalMappingApply()
391: a second time to set the values.
393: Notes:
394: Either nout or idxout may be PETSC_NULL. idx and idxout may be identical.
396: This is not scalable in memory usage. Each processor requires O(Nglobal) size
397: array to compute these.
399: Level: advanced
401: Concepts: mapping^global to local
403: .seealso: ISLocalToGlobalMappingApply(), ISLocalToGlobalMappingCreate(),
404: ISLocalToGlobalMappingDestroy()
405: @*/
406: int ISGlobalToLocalMappingApply(ISLocalToGlobalMapping mapping,ISGlobalToLocalMappingType type,
407: int n,const int idx[],int *nout,int idxout[])
408: {
409: int i,ierr,*globals,nf = 0,tmp,start,end;
412: if (!mapping->globals) {
413: ISGlobalToLocalMappingSetUp_Private(mapping);
414: }
415: globals = mapping->globals;
416: start = mapping->globalstart;
417: end = mapping->globalend;
419: if (type == IS_GTOLM_MASK) {
420: if (idxout) {
421: for (i=0; i<n; i++) {
422: if (idx[i] < 0) idxout[i] = idx[i];
423: else if (idx[i] < start) idxout[i] = -1;
424: else if (idx[i] > end) idxout[i] = -1;
425: else idxout[i] = globals[idx[i] - start];
426: }
427: }
428: if (nout) *nout = n;
429: } else {
430: if (idxout) {
431: for (i=0; i<n; i++) {
432: if (idx[i] < 0) continue;
433: if (idx[i] < start) continue;
434: if (idx[i] > end) continue;
435: tmp = globals[idx[i] - start];
436: if (tmp < 0) continue;
437: idxout[nf++] = tmp;
438: }
439: } else {
440: for (i=0; i<n; i++) {
441: if (idx[i] < 0) continue;
442: if (idx[i] < start) continue;
443: if (idx[i] > end) continue;
444: tmp = globals[idx[i] - start];
445: if (tmp < 0) continue;
446: nf++;
447: }
448: }
449: if (nout) *nout = nf;
450: }
452: return(0);
453: }
455: /*@C
456: ISLocalToGlobalMappingGetInfo - Gets the neighbor information for each processor and
457: each index shared by more than one processor
459: Collective on ISLocalToGlobalMapping
461: Input Parameters:
462: . mapping - the mapping from local to global indexing
464: Output Parameter:
465: + nproc - number of processors that are connected to this one
466: . proc - neighboring processors
467: . numproc - number of indices for each subdomain (processor)
468: - indices - indices of local nodes shared with neighbor (sorted by global numbering)
470: Level: advanced
472: Concepts: mapping^local to global
474: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreate(),
475: ISLocalToGlobalMappingRestoreInfo()
476: @*/
477: int ISLocalToGlobalMappingGetInfo(ISLocalToGlobalMapping mapping,int *nproc,int **procs,int **numprocs,int ***indices)
478: {
479: int i,n = mapping->n,ierr,Ng,ng,max = 0,*lindices = mapping->indices;
480: int size,rank,*nprocs,*owner,nsends,*sends,j,*starts,*work,nmax,nrecvs,*recvs,proc;
481: int tag1,tag2,tag3,cnt,*len,*source,imdex,scale,*ownedsenders,*nownedsenders,rstart,nowned;
482: int node,nownedm,nt,*sends2,nsends2,*starts2,*lens2,*dest,nrecvs2,*starts3,*recvs2,k,*bprocs,*tmp;
483: int first_procs,first_numprocs,*first_indices;
484: MPI_Request *recv_waits,*send_waits;
485: MPI_Status recv_status,*send_status,*recv_statuses;
486: MPI_Comm comm = mapping->comm;
487: PetscTruth debug = PETSC_FALSE;
490: ierr = MPI_Comm_size(comm,&size);
491: ierr = MPI_Comm_rank(comm,&rank);
492: if (size == 1) {
493: *nproc = 0;
494: *procs = PETSC_NULL;
495: ierr = PetscMalloc(sizeof(int),numprocs);
496: (*numprocs)[0] = 0;
497: ierr = PetscMalloc(sizeof(int*),indices);
498: (*indices)[0] = PETSC_NULL;
499: return(0);
500: }
502: PetscOptionsHasName(PETSC_NULL,"-islocaltoglobalmappinggetinfo_debug",&debug);
504: /*
505: Notes on ISLocalToGlobalMappingGetInfo
507: globally owned node - the nodes that have been assigned to this processor in global
508: numbering, just for this routine.
510: nontrivial globally owned node - node assigned to this processor that is on a subdomain
511: boundary (i.e. is has more than one local owner)
513: locally owned node - node that exists on this processors subdomain
515: nontrivial locally owned node - node that is not in the interior (i.e. has more than one
516: local subdomain
517: */
518: PetscObjectGetNewTag((PetscObject)mapping,&tag1);
519: PetscObjectGetNewTag((PetscObject)mapping,&tag2);
520: PetscObjectGetNewTag((PetscObject)mapping,&tag3);
522: for (i=0; i<n; i++) {
523: if (lindices[i] > max) max = lindices[i];
524: }
525: ierr = MPI_Allreduce(&max,&Ng,1,MPI_INT,MPI_MAX,comm);
526: Ng++;
527: ierr = MPI_Comm_size(comm,&size);
528: ierr = MPI_Comm_rank(comm,&rank);
529: scale = Ng/size + 1;
530: ng = scale; if (rank == size-1) ng = Ng - scale*(size-1); ng = PetscMax(1,ng);
531: rstart = scale*rank;
533: /* determine ownership ranges of global indices */
534: PetscMalloc((2*size+1)*sizeof(int),&nprocs);
535: PetscMemzero(nprocs,2*size*sizeof(int));
537: /* determine owners of each local node */
538: PetscMalloc((n+1)*sizeof(int),&owner);
539: for (i=0; i<n; i++) {
540: proc = lindices[i]/scale; /* processor that globally owns this index */
541: nprocs[size+proc] = 1; /* processor globally owns at least one of ours */
542: owner[i] = proc;
543: nprocs[proc]++; /* count of how many that processor globally owns of ours */
544: }
545: nsends = 0; for (i=0; i<size; i++) nsends += nprocs[size + i];
546: PetscLogInfo(0,"ISLocalToGlobalMappingGetInfo: Number of global owners for my local data %dn",nsends);
548: /* inform other processors of number of messages and max length*/
549: PetscMalloc(2*size*sizeof(int),&work);
550: ierr = MPI_Allreduce(nprocs,work,2*size,MPI_INT,PetscMaxSum_Op,comm);
551: nmax = work[rank];
552: nrecvs = work[size+rank];
553: ierr = PetscFree(work);
554: PetscLogInfo(0,"ISLocalToGlobalMappingGetInfo: Number of local owners for my global data %dn",nrecvs);
556: /* post receives for owned rows */
557: PetscMalloc((2*nrecvs+1)*(nmax+1)*sizeof(int),&recvs);
558: PetscMalloc((nrecvs+1)*sizeof(MPI_Request),&recv_waits);
559: for (i=0; i<nrecvs; i++) {
560: MPI_Irecv(recvs+2*nmax*i,2*nmax,MPI_INT,MPI_ANY_SOURCE,tag1,comm,recv_waits+i);
561: }
563: /* pack messages containing lists of local nodes to owners */
564: ierr = PetscMalloc((2*n+1)*sizeof(int),&sends);
565: ierr = PetscMalloc((size+1)*sizeof(int),&starts);
566: starts[0] = 0;
567: for (i=1; i<size; i++) { starts[i] = starts[i-1] + 2*nprocs[i-1];}
568: for (i=0; i<n; i++) {
569: sends[starts[owner[i]]++] = lindices[i];
570: sends[starts[owner[i]]++] = i;
571: }
572: PetscFree(owner);
573: starts[0] = 0;
574: for (i=1; i<size; i++) { starts[i] = starts[i-1] + 2*nprocs[i-1];}
576: /* send the messages */
577: PetscMalloc((nsends+1)*sizeof(MPI_Request),&send_waits);
578: PetscMalloc((nsends+1)*sizeof(int),&dest);
579: cnt = 0;
580: for (i=0; i<size; i++) {
581: if (nprocs[i]) {
582: ierr = MPI_Isend(sends+starts[i],2*nprocs[i],MPI_INT,i,tag1,comm,send_waits+cnt);
583: dest[cnt] = i;
584: cnt++;
585: }
586: }
587: PetscFree(starts);
589: /* wait on receives */
590: PetscMalloc((2*nrecvs+1)*sizeof(int),&source);
591: len = source + nrecvs;
592: cnt = nrecvs;
593: PetscMalloc((ng+1)*sizeof(int),&nownedsenders);
594: PetscMemzero(nownedsenders,ng*sizeof(int));
595: while (cnt) {
596: MPI_Waitany(nrecvs,recv_waits,&imdex,&recv_status);
597: /* unpack receives into our local space */
598: ierr = MPI_Get_count(&recv_status,MPI_INT,&len[imdex]);
599: source[imdex] = recv_status.MPI_SOURCE;
600: len[imdex] = len[imdex]/2;
601: /* count how many local owners for each of my global owned indices */
602: for (i=0; i<len[imdex]; i++) nownedsenders[recvs[2*imdex*nmax+2*i]-rstart]++;
603: cnt--;
604: }
605: PetscFree(recv_waits);
607: /* count how many globally owned indices are on an edge multiplied by how many processors own them. */
608: nowned = 0;
609: nownedm = 0;
610: for (i=0; i<ng; i++) {
611: if (nownedsenders[i] > 1) {nownedm += nownedsenders[i]; nowned++;}
612: }
614: /* create single array to contain rank of all local owners of each globally owned index */
615: ierr = PetscMalloc((nownedm+1)*sizeof(int),&ownedsenders);
616: ierr = PetscMalloc((ng+1)*sizeof(int),&starts);
617: starts[0] = 0;
618: for (i=1; i<ng; i++) {
619: if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
620: else starts[i] = starts[i-1];
621: }
623: /* for each nontrival globally owned node list all arriving processors */
624: for (i=0; i<nrecvs; i++) {
625: for (j=0; j<len[i]; j++) {
626: node = recvs[2*i*nmax+2*j]-rstart;
627: if (nownedsenders[node] > 1) {
628: ownedsenders[starts[node]++] = source[i];
629: }
630: }
631: }
633: if (debug) { /* ----------------------------------- */
634: starts[0] = 0;
635: for (i=1; i<ng; i++) {
636: if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
637: else starts[i] = starts[i-1];
638: }
639: for (i=0; i<ng; i++) {
640: if (nownedsenders[i] > 1) {
641: PetscSynchronizedPrintf(comm,"[%d] global node %d local owner processors: ",rank,i+rstart);
642: for (j=0; j<nownedsenders[i]; j++) {
643: PetscSynchronizedPrintf(comm,"%d ",ownedsenders[starts[i]+j]);
644: }
645: PetscSynchronizedPrintf(comm,"n");
646: }
647: }
648: PetscSynchronizedFlush(comm);
649: }/* ----------------------------------- */
651: /* wait on original sends */
652: if (nsends) {
653: PetscMalloc(nsends*sizeof(MPI_Status),&send_status);
654: MPI_Waitall(nsends,send_waits,send_status);
655: PetscFree(send_status);
656: }
657: PetscFree(send_waits);
658: PetscFree(sends);
659: PetscFree(nprocs);
661: /* pack messages to send back to local owners */
662: starts[0] = 0;
663: for (i=1; i<ng; i++) {
664: if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
665: else starts[i] = starts[i-1];
666: }
667: nsends2 = nrecvs;
668: ierr = PetscMalloc((nsends2+1)*sizeof(int),&nprocs); /* length of each message */
669: for (i=0; i<nrecvs; i++) {
670: nprocs[i] = 1;
671: for (j=0; j<len[i]; j++) {
672: node = recvs[2*i*nmax+2*j]-rstart;
673: if (nownedsenders[node] > 1) {
674: nprocs[i] += 2 + nownedsenders[node];
675: }
676: }
677: }
678: nt = 0; for (i=0; i<nsends2; i++) nt += nprocs[i];
679: PetscMalloc((nt+1)*sizeof(int),&sends2);
680: PetscMalloc((nsends2+1)*sizeof(int),&starts2);
681: starts2[0] = 0; for (i=1; i<nsends2; i++) starts2[i] = starts2[i-1] + nprocs[i-1];
682: /*
683: Each message is 1 + nprocs[i] long, and consists of
684: (0) the number of nodes being sent back
685: (1) the local node number,
686: (2) the number of processors sharing it,
687: (3) the processors sharing it
688: */
689: for (i=0; i<nsends2; i++) {
690: cnt = 1;
691: sends2[starts2[i]] = 0;
692: for (j=0; j<len[i]; j++) {
693: node = recvs[2*i*nmax+2*j]-rstart;
694: if (nownedsenders[node] > 1) {
695: sends2[starts2[i]]++;
696: sends2[starts2[i]+cnt++] = recvs[2*i*nmax+2*j+1];
697: sends2[starts2[i]+cnt++] = nownedsenders[node];
698: PetscMemcpy(&sends2[starts2[i]+cnt],&ownedsenders[starts[node]],nownedsenders[node]*sizeof(int));
699: cnt += nownedsenders[node];
700: }
701: }
702: }
704: /* send the message lengths */
705: for (i=0; i<nsends2; i++) {
706: MPI_Send(&nprocs[i],1,MPI_INT,source[i],tag2,comm);
707: }
709: /* receive the message lengths */
710: nrecvs2 = nsends;
711: PetscMalloc((nrecvs2+1)*sizeof(int),&lens2);
712: PetscMalloc((nrecvs2+1)*sizeof(int),&starts3);
713: nt = 0;
714: for (i=0; i<nrecvs2; i++) {
715: MPI_Recv(&lens2[i],1,MPI_INT,dest[i],tag2,comm,&recv_status);
716: nt += lens2[i];
717: }
718: starts3[0] = 0;
719: for (i=0; i<nrecvs2-1; i++) {
720: starts3[i+1] = starts3[i] + lens2[i];
721: }
722: PetscMalloc((nt+1)*sizeof(int),&recvs2);
723: PetscMalloc((nrecvs2+1)*sizeof(MPI_Request),&recv_waits);
724: for (i=0; i<nrecvs2; i++) {
725: MPI_Irecv(recvs2+starts3[i],lens2[i],MPI_INT,dest[i],tag3,comm,recv_waits+i);
726: }
727:
728: /* send the messages */
729: PetscMalloc((nsends2+1)*sizeof(MPI_Request),&send_waits);
730: for (i=0; i<nsends2; i++) {
731: MPI_Isend(sends2+starts2[i],nprocs[i],MPI_INT,source[i],tag3,comm,send_waits+i);
732: }
734: /* wait on receives */
735: PetscMalloc((nrecvs2+1)*sizeof(MPI_Status),&recv_statuses);
736: MPI_Waitall(nrecvs2,recv_waits,recv_statuses);
737: PetscFree(recv_statuses);
738: PetscFree(recv_waits);
739: PetscFree(nprocs);
741: if (debug) { /* ----------------------------------- */
742: cnt = 0;
743: for (i=0; i<nrecvs2; i++) {
744: nt = recvs2[cnt++];
745: for (j=0; j<nt; j++) {
746: PetscSynchronizedPrintf(comm,"[%d] local node %d number of subdomains %d: ",rank,recvs2[cnt],recvs2[cnt+1]);
747: for (k=0; k<recvs2[cnt+1]; k++) {
748: PetscSynchronizedPrintf(comm,"%d ",recvs2[cnt+2+k]);
749: }
750: cnt += 2 + recvs2[cnt+1];
751: PetscSynchronizedPrintf(comm,"n");
752: }
753: }
754: PetscSynchronizedFlush(comm);
755: } /* ----------------------------------- */
757: /* count number subdomains for each local node */
758: PetscMalloc(size*sizeof(int),&nprocs);
759: PetscMemzero(nprocs,size*sizeof(int));
760: cnt = 0;
761: for (i=0; i<nrecvs2; i++) {
762: nt = recvs2[cnt++];
763: for (j=0; j<nt; j++) {
764: for (k=0; k<recvs2[cnt+1]; k++) {
765: nprocs[recvs2[cnt+2+k]]++;
766: }
767: cnt += 2 + recvs2[cnt+1];
768: }
769: }
770: nt = 0; for (i=0; i<size; i++) nt += (nprocs[i] > 0);
771: *nproc = nt;
772: PetscMalloc((nt+1)*sizeof(int),procs);
773: PetscMalloc((nt+1)*sizeof(int),numprocs);
774: PetscMalloc((nt+1)*sizeof(int*),indices);
775: PetscMalloc(size*sizeof(int),&bprocs);
776: cnt = 0;
777: for (i=0; i<size; i++) {
778: if (nprocs[i] > 0) {
779: bprocs[i] = cnt;
780: (*procs)[cnt] = i;
781: (*numprocs)[cnt] = nprocs[i];
782: ierr = PetscMalloc(nprocs[i]*sizeof(int),&(*indices)[cnt]);
783: cnt++;
784: }
785: }
787: /* make the list of subdomains for each nontrivial local node */
788: PetscMemzero(*numprocs,nt*sizeof(int));
789: cnt = 0;
790: for (i=0; i<nrecvs2; i++) {
791: nt = recvs2[cnt++];
792: for (j=0; j<nt; j++) {
793: for (k=0; k<recvs2[cnt+1]; k++) {
794: (*indices)[bprocs[recvs2[cnt+2+k]]][(*numprocs)[bprocs[recvs2[cnt+2+k]]]++] = recvs2[cnt];
795: }
796: cnt += 2 + recvs2[cnt+1];
797: }
798: }
799: PetscFree(bprocs);
800: PetscFree(recvs2);
802: /* sort the node indexing by their global numbers */
803: nt = *nproc;
804: for (i=0; i<nt; i++) {
805: PetscMalloc(((*numprocs)[i])*sizeof(int),&tmp);
806: for (j=0; j<(*numprocs)[i]; j++) {
807: tmp[j] = lindices[(*indices)[i][j]];
808: }
809: PetscSortIntWithArray((*numprocs)[i],tmp,(*indices)[i]);
810: PetscFree(tmp);
811: }
813: if (debug) { /* ----------------------------------- */
814: nt = *nproc;
815: for (i=0; i<nt; i++) {
816: PetscSynchronizedPrintf(comm,"[%d] subdomain %d number of indices %d: ",rank,(*procs)[i],(*numprocs)[i]);
817: for (j=0; j<(*numprocs)[i]; j++) {
818: PetscSynchronizedPrintf(comm,"%d ",(*indices)[i][j]);
819: }
820: PetscSynchronizedPrintf(comm,"n");
821: }
822: PetscSynchronizedFlush(comm);
823: } /* ----------------------------------- */
825: /* wait on sends */
826: if (nsends2) {
827: PetscMalloc(nsends2*sizeof(MPI_Status),&send_status);
828: MPI_Waitall(nsends2,send_waits,send_status);
829: PetscFree(send_status);
830: }
832: PetscFree(starts3);
833: PetscFree(dest);
834: PetscFree(send_waits);
836: PetscFree(nownedsenders);
837: PetscFree(ownedsenders);
838: PetscFree(starts);
839: PetscFree(starts2);
840: PetscFree(lens2);
842: PetscFree(source);
843: PetscFree(recvs);
844: PetscFree(nprocs);
845: PetscFree(sends2);
847: /* put the information about myself as the first entry in the list */
848: first_procs = (*procs)[0];
849: first_numprocs = (*numprocs)[0];
850: first_indices = (*indices)[0];
851: for (i=0; i<*nproc; i++) {
852: if ((*procs)[i] == rank) {
853: (*procs)[0] = (*procs)[i];
854: (*numprocs)[0] = (*numprocs)[i];
855: (*indices)[0] = (*indices)[i];
856: (*procs)[i] = first_procs;
857: (*numprocs)[i] = first_numprocs;
858: (*indices)[i] = first_indices;
859: break;
860: }
861: }
863: return(0);
864: }
866: /*@C
867: ISLocalToGlobalMappingRestoreInfo - Frees the memory allocated by ISLocalToGlobalMappingGetInfo()
869: Collective on ISLocalToGlobalMapping
871: Input Parameters:
872: . mapping - the mapping from local to global indexing
874: Output Parameter:
875: + nproc - number of processors that are connected to this one
876: . proc - neighboring processors
877: . numproc - number of indices for each processor
878: - indices - indices of local nodes shared with neighbor (sorted by global numbering)
880: Level: advanced
882: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreate(),
883: ISLocalToGlobalMappingGetInfo()
884: @*/
885: int ISLocalToGlobalMappingRestoreInfo(ISLocalToGlobalMapping mapping,int *nproc,int **procs,int **numprocs,int ***indices)
886: {
887: int ierr,i;
890: if (*procs) {PetscFree(*procs);}
891: if (*numprocs) {PetscFree(*numprocs);}
892: if (*indices) {
893: if ((*indices)[0]) {PetscFree((*indices)[0]);}
894: for (i=1; i<*nproc; i++) {
895: if ((*indices)[i]) {PetscFree((*indices)[i]);}
896: }
897: PetscFree(*indices);
898: }
899: return(0);
900: }