Actual source code: ghost.c

  1: #ifdef PETSC_RCS_HEADER
  2: static char vcid[] = "$Id: ghost.c,v 1.3 2000/01/10 03:27:01 knepley Exp $";
  3: #endif

 5:  #include petscsys.h

  7: /*@C
  8:   PetscGhostExchange - This functions transfers data between local and ghost storage without a predefined mapping.

 10:   Collective on MPI_Comm

 12:   Input Parameters:
 13: + comm         - The communicator
 14: . numGhosts    - The number of ghosts in this domain
 15: . ghostProcs   - The processor from which to obtain each ghost
 16: . ghostIndices - The global index for each ghost
 17: . dataType     - The type of the variables
 18: . firstVar     - The first variable on each processor
 19: . addv         - The insert mode, INSERT_VALUES or ADD_VALUES
 20: - mode         - The direction of the transfer, SCATTER_FORWARD or SCATTER_REVERSE

 22:   Output Parameters:
 23: + locVars      - The local variable array
 24: - ghostVars    - The ghost variables

 26:   Note:
 27:   The data in ghostVars is assumed contiguous and implicitly indexed by the order of
 28:   ghostProcs and ghostIndices. The SCATTER_FORWARD mode will take the requested data
 29:   from locVars and copy it to ghostVars in the order specified by ghostIndices. The
 30:   SCATTER_REVERSE mode will take data from ghostVars and copy it to locVars.

 32:   Level: advanced

 34: .keywords: ghost, exchange
 35: .seealso: GridGlobalToLocal(), GridLocalToGlobal()
 36: @*/
 37: int PetscGhostExchange(MPI_Comm comm, int numGhosts, int *ghostProcs, int *ghostIndices, PetscDataType dataType,
 38:                       int *firstVar, InsertMode addv, ScatterMode mode, void *locVars, void *ghostVars)
 39: {
 40:   int         *numSendGhosts; /* The number of ghosts from each domain */
 41:   int         *numRecvGhosts; /* The number of local variables which are ghosts in each domain */
 42:   int         *sumSendGhosts; /* The prefix sums of numSendGhosts */
 43:   int         *sumRecvGhosts; /* The prefix sums of numRecvGhosts */
 44:   int         *offsets;       /* The offset into the send array for each domain */
 45:   int          totSendGhosts; /* The number of ghosts to request variables for */
 46:   int          totRecvGhosts; /* The number of nodes to provide class info about */
 47:   int         *sendIndices;   /* The canonical indices of ghosts in this domain */
 48:   int         *recvIndices;   /* The canonical indices of ghosts to return variables for */
 49:   char        *tempVars;      /* The variables of the requested or submitted ghosts */
 50:   char        *locBytes   = (char *) locVars;
 51:   MPI_Datatype MPIType;
 52:   int          typeSize;
 53: #ifdef PETSC_USE_BOPT_g
 54:   int          numLocVars;
 55: #endif
 56:   int          numProcs, rank;
 57:   int          proc, ghost, locIndex, byte;
 58:   int          ierr;

 61:   /* Initialize communication */
 62:   MPI_Comm_size(comm, &numProcs);
 63:   MPI_Comm_rank(comm, &rank);
 64:   PetscMalloc(numProcs * sizeof(int), &numSendGhosts);
 65:   PetscMalloc(numProcs * sizeof(int), &numRecvGhosts);
 66:   PetscMalloc(numProcs * sizeof(int), &sumSendGhosts);
 67:   PetscMalloc(numProcs * sizeof(int), &sumRecvGhosts);
 68:   PetscMalloc(numProcs * sizeof(int), &offsets);
 69:   PetscMemzero(numSendGhosts,  numProcs * sizeof(int));
 70:   PetscMemzero(numRecvGhosts,  numProcs * sizeof(int));
 71:   PetscMemzero(sumSendGhosts,  numProcs * sizeof(int));
 72:   PetscMemzero(sumRecvGhosts,  numProcs * sizeof(int));
 73:   PetscMemzero(offsets,        numProcs * sizeof(int));
 74: #ifdef PETSC_USE_BOPT_g
 75:   numLocVars = firstVar[rank+1] - firstVar[rank];
 76: #endif

 78:   /* Get number of ghosts needed from each processor */
 79:   for(ghost = 0; ghost < numGhosts; ghost++) {
 80:     numSendGhosts[ghostProcs[ghost]]++;
 81:   }

 83:   /* Get number of ghosts to provide variables for */
 84:   MPI_Alltoall(numSendGhosts, 1, MPI_INT, numRecvGhosts, 1, MPI_INT, comm);
 85:   for(proc = 1; proc < numProcs; proc++) {
 86:     sumSendGhosts[proc] = sumSendGhosts[proc-1] + numSendGhosts[proc-1];
 87:     sumRecvGhosts[proc] = sumRecvGhosts[proc-1] + numRecvGhosts[proc-1];
 88:     offsets[proc]       = sumSendGhosts[proc];
 89:   }
 90:   totSendGhosts = sumSendGhosts[numProcs-1] + numSendGhosts[numProcs-1];
 91:   totRecvGhosts = sumRecvGhosts[numProcs-1] + numRecvGhosts[numProcs-1];
 92:   if (numGhosts != totSendGhosts) {
 93:     SETERRQ2(PETSC_ERR_PLIB, "Invalid number of ghosts %d in send, should be %d", totSendGhosts, numGhosts);
 94:   }

 96:   PetscDataTypeGetSize(dataType, &typeSize);
 97:   if (totSendGhosts) {
 98:     PetscMalloc(totSendGhosts * sizeof(int), &sendIndices);
 99:   }
100:   if (totRecvGhosts) {
101:     PetscMalloc(totRecvGhosts * sizeof(int), &recvIndices);
102:     PetscMalloc(totRecvGhosts * typeSize,    &tempVars);
103:   }

105:   /* Must order ghosts by processor */
106:   for(ghost = 0; ghost < numGhosts; ghost++) {
107:     sendIndices[offsets[ghostProcs[ghost]]++] = ghostIndices[ghost];
108:   }

110:   /* Get canonical indices of ghosts to provide variables for */
111:   MPI_Alltoallv(sendIndices, numSendGhosts, sumSendGhosts, MPI_INT,
112:                        recvIndices, numRecvGhosts, sumRecvGhosts, MPI_INT, comm);
113: 

115:   switch(mode)
116:   {
117:   case SCATTER_FORWARD:
118:     /* Get ghost variables */
119:     if (addv == INSERT_VALUES) {
120:       for(ghost = 0; ghost < totRecvGhosts; ghost++) {
121:         locIndex = recvIndices[ghost] - firstVar[rank];
122: #ifdef PETSC_USE_BOPT_g
123:         if ((locIndex < 0) || (locIndex >= numLocVars)) {
124:           SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE, "Invalid ghost index %d, not in [0,%d)", locIndex, numLocVars);
125:         }
126: #endif
127:         for(byte = 0; byte < typeSize; byte++) {
128:           tempVars[ghost*typeSize+byte] = locBytes[locIndex*typeSize+byte];
129:         }
130:       }
131:     } else {
132:       for(ghost = 0; ghost < totRecvGhosts; ghost++) {
133:         locIndex = recvIndices[ghost] - firstVar[rank];
134: #ifdef PETSC_USE_BOPT_g
135:         if ((locIndex < 0) || (locIndex >= numLocVars)) {
136:           SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE, "Invalid ghost index %d, not in [0,%d)", locIndex, numLocVars);
137:         }
138: #endif
139:         for(byte = 0; byte < typeSize; byte++) {
140:           tempVars[ghost*typeSize+byte] += locBytes[locIndex*typeSize+byte];
141:         }
142:       }
143:     }

145:     /* Communicate local variables to ghost storage */
146:     PetscDataTypeToMPIDataType(dataType, &MPIType);
147:     MPI_Alltoallv(tempVars,  numRecvGhosts, sumRecvGhosts, MPIType,
148:                          ghostVars, numSendGhosts, sumSendGhosts, MPIType, comm);
149: 
150:     break;
151:   case SCATTER_REVERSE:
152:     /* Communicate ghost variables to local storage */
153:     PetscDataTypeToMPIDataType(dataType, &MPIType);
154:     MPI_Alltoallv(ghostVars, numSendGhosts, sumSendGhosts, MPIType,
155:                          tempVars,  numRecvGhosts, sumRecvGhosts, MPIType, comm);
156: 

158:     /* Get ghost variables */
159:     if (addv == INSERT_VALUES) {
160:       for(ghost = 0; ghost < totRecvGhosts; ghost++) {
161:         locIndex = recvIndices[ghost] - firstVar[rank];
162: #ifdef PETSC_USE_BOPT_g
163:         if ((locIndex < 0) || (locIndex >= numLocVars)) {
164:           SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE, "Invalid ghost index %d, not in [0,%d)", locIndex, numLocVars);
165:         }
166: #endif
167:         for(byte = 0; byte < typeSize; byte++) {
168:           locBytes[locIndex*typeSize+byte] = tempVars[ghost*typeSize+byte];
169:         }
170:       }
171:     } else {
172:       /* There must be a better way to do this -- Ask Bill */
173:       if (typeSize == sizeof(int)) {
174:         int *tempInt = (int *) tempVars;
175:         int *locInt  = (int *) locVars;

177:         for(ghost = 0; ghost < totRecvGhosts; ghost++) {
178:           locIndex = recvIndices[ghost] - firstVar[rank];
179: #ifdef PETSC_USE_BOPT_g
180:           if ((locIndex < 0) || (locIndex >= numLocVars)) {
181:             SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE, "Invalid ghost index %d, not in [0,%d)", locIndex, numLocVars);
182:           }
183: #endif
184:           locInt[locIndex] += tempInt[ghost];
185:         }
186:       } else if (typeSize == sizeof(long int)) {
187:         long int *tempLongInt = (long int *) tempVars;
188:         long int *locLongInt  = (long int *) locVars;

190:         for(ghost = 0; ghost < totRecvGhosts; ghost++) {
191:           locIndex = recvIndices[ghost] - firstVar[rank];
192: #ifdef PETSC_USE_BOPT_g
193:           if ((locIndex < 0) || (locIndex >= numLocVars)) {
194:             SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE, "Invalid ghost index %d, not in [0,%d)", locIndex, numLocVars);
195:           }
196: #endif
197:           locLongInt[locIndex] += tempLongInt[ghost];
198:         }
199:       } else {
200:         for(ghost = 0; ghost < totRecvGhosts; ghost++) {
201:           locIndex = recvIndices[ghost] - firstVar[rank];
202: #ifdef PETSC_USE_BOPT_g
203:           if ((locIndex < 0) || (locIndex >= numLocVars)) {
204:             SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE, "Invalid ghost index %d, not in [0,%d)", locIndex, numLocVars);
205:           }
206: #endif
207:           for(byte = 0; byte < typeSize; byte++) {
208:             locBytes[locIndex*typeSize+byte] += tempVars[ghost*typeSize+byte];
209:           }
210:         }
211:       }
212:     }
213:     break;
214:   default:
215:     SETERRQ1(PETSC_ERR_ARG_WRONG, "Invalid scatter mode %d", mode);
216:   }

218:   /* Cleanup */
219:   PetscFree(numSendGhosts);
220:   PetscFree(numRecvGhosts);
221:   PetscFree(sumSendGhosts);
222:   PetscFree(sumRecvGhosts);
223:   PetscFree(offsets);
224:   if (totSendGhosts) {
225:     PetscFree(sendIndices);
226:   }
227:   if (totRecvGhosts) {
228:     PetscFree(recvIndices);
229:     PetscFree(tempVars);
230:   }
231:   return(0);
232: }