Actual source code: dasub.c
1: #define PETSCDM_DLL
3: /*
4: Code for manipulating distributed regular arrays in parallel.
5: */
7: #include src/dm/da/daimpl.h
11: /*@C
12: DAGetProcessorSubset - Returns a communicator consisting only of the
13: processors in a DA that own a particular global x, y, or z grid point
14: (corresponding to a logical plane in a 3D grid or a line in a 2D grid).
16: Collective on DA
18: Input Parameters:
19: + da - the distributed array
20: . dir - Cartesian direction, either DA_X, DA_Y, or DA_Z
21: - gp - global grid point number in this direction
23: Output Parameters:
24: . comm - new communicator
26: Level: advanced
28: Notes:
29: All processors that share the DA must call this with the same gp value
31: This routine is particularly useful to compute boundary conditions
32: or other application-specific calculations that require manipulating
33: sets of data throughout a logical plane of grid points.
35: .keywords: distributed array, get, processor subset
36: @*/
37: PetscErrorCode DAGetProcessorSubset(DA da,DADirection dir,PetscInt gp,MPI_Comm *comm)
38: {
39: MPI_Group group,subgroup;
41: PetscInt i,ict,flag,*owners,xs,xm,ys,ym,zs,zm;
42: PetscMPIInt size,*ranks = PETSC_NULL;
46: flag = 0;
47: DAGetCorners(da,&xs,&xm,&ys,&ym,&zs,&zm);
48: MPI_Comm_size(da->comm,&size);
49: if (dir == DA_Z) {
50: if (da->dim < 3) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"DA_Z invalid for DA dim < 3");
51: if (gp < 0 || gp > da->P) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"invalid grid point");
52: if (gp >= zs && gp < zs+zm) flag = 1;
53: } else if (dir == DA_Y) {
54: if (da->dim == 1) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"DA_Y invalid for DA dim = 1");
55: if (gp < 0 || gp > da->N) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"invalid grid point");
56: if (gp >= ys && gp < ys+ym) flag = 1;
57: } else if (dir == DA_X) {
58: if (gp < 0 || gp > da->M) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"invalid grid point");
59: if (gp >= xs && gp < xs+xm) flag = 1;
60: } else SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Invalid direction");
62: PetscMalloc2(size,PetscInt,&owners,size,PetscMPIInt,&ranks);
63: MPI_Allgather(&flag,1,MPIU_INT,owners,1,MPIU_INT,da->comm);
64: ict = 0;
65: PetscInfo2(da,"DAGetProcessorSubset: dim=%D, direction=%d, procs: ",da->dim,(int)dir);
66: for (i=0; i<size; i++) {
67: if (owners[i]) {
68: ranks[ict] = i; ict++;
69: PetscInfo1(da,"%D ",i);
70: }
71: }
72: PetscInfo(da,"\n");
73: MPI_Comm_group(da->comm,&group);
74: MPI_Group_incl(group,ict,ranks,&subgroup);
75: MPI_Comm_create(da->comm,subgroup,comm);
76: PetscFree2(owners,ranks);
77: return(0);
78: }