Actual source code: dasub.c
1: /*$Id: dasub.c,v 1.33 2001/03/23 23:25:00 balay Exp $*/
2:
3: /*
4: Code for manipulating distributed regular arrays in parallel.
5: */
7: #include src/dm/da/daimpl.h
9: /*@C
10: DAGetProcessorSubset - Returns a communicator consisting only of the
11: processors in a DA that own a particular global x, y, or z grid point
12: (corresponding to a logical plane in a 3D grid or a line in a 2D grid).
14: Collective on DA
16: Input Parameters:
17: + da - the distributed array
18: . dir - Cartesian direction, either DA_X, DA_Y, or DA_Z
19: - gp - global grid point number in this direction
21: Output Parameters:
22: . comm - new communicator
24: Level: advanced
26: Notes:
27: This routine is particularly useful to compute boundary conditions
28: or other application-specific calculations that require manipulating
29: sets of data throughout a logical plane of grid points.
31: .keywords: distributed array, get, processor subset
32: @*/
33: int DAGetProcessorSubset(DA da,DADirection dir,int gp,MPI_Comm *comm)
34: {
35: MPI_Group group,subgroup;
36: int ierr,i,ict,flag,size,*ranks,*owners,xs,xm,ys,ym,zs,zm;
40: flag = 0;
41: DAGetCorners(da,&xs,&xm,&ys,&ym,&zs,&zm);
42: MPI_Comm_size(da->comm,&size);
43: if (dir == DA_Z) {
44: if (da->dim < 3) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"DA_Z invalid for DA dim < 3");
45: if (gp < 0 || gp > da->P) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"invalid grid point");
46: if (gp >= zs && gp < zs+zm) flag = 1;
47: } else if (dir == DA_Y) {
48: if (da->dim == 1) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"DA_Y invalid for DA dim = 1");
49: if (gp < 0 || gp > da->N) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"invalid grid point");
50: if (gp >= ys && gp < ys+ym) flag = 1;
51: } else if (dir == DA_X) {
52: if (gp < 0 || gp > da->M) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"invalid grid point");
53: if (gp >= xs && gp < xs+xm) flag = 1;
54: } else SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Invalid direction");
56: PetscMalloc(2*size*sizeof(int),&owners);
57: ranks = owners + size;
58: MPI_Allgather(&flag,1,MPI_INT,owners,1,MPI_INT,da->comm);
59: ict = 0;
60: PetscLogInfo(da,"DAGetProcessorSubset: dim=%d, direction=%d, procs: ",da->dim,(int)dir);
61: for (i=0; i<size; i++) {
62: if (owners[i]) {
63: ranks[ict] = i; ict++;
64: PetscLogInfo(da,"%d ",i);
65: }
66: }
67: PetscLogInfo(da,"n");
68: MPI_Comm_group(da->comm,&group);
69: MPI_Group_incl(group,ict,ranks,&subgroup);
70: MPI_Comm_create(da->comm,subgroup,comm);
71: PetscFree(owners);
72: return(0);
73: }