Actual source code: mpiu.c
1: /*$Id: mpiu.c,v 1.100 2001/03/23 23:20:45 balay Exp $*/
3: #include petsc.h
5: int PetscSequentialPhaseBegin_Private(MPI_Comm comm,int ng)
6: {
7: int lidx,np,tag = 0,ierr;
8: MPI_Status status;
11: MPI_Comm_size(comm,&np);
12: if (np == 1) return(0);
13: MPI_Comm_rank(comm,&lidx);
14: if (lidx != 0) {
15: MPI_Recv(0,0,MPI_INT,lidx-1,tag,comm,&status);
16: }
17: /* Send to the next process in the group unless we are the last process */
18: if ((lidx % ng) < ng - 1 && lidx != np - 1) {
19: MPI_Send(0,0,MPI_INT,lidx + 1,tag,comm);
20: }
21: return(0);
22: }
24: int PetscSequentialPhaseEnd_Private(MPI_Comm comm,int ng)
25: {
26: int lidx,np,tag = 0,ierr;
27: MPI_Status status;
30: MPI_Comm_rank(comm,&lidx);
31: MPI_Comm_size(comm,&np);
32: if (np == 1) return(0);
34: /* Send to the first process in the next group */
35: if ((lidx % ng) == ng - 1 || lidx == np - 1) {
36: MPI_Send(0,0,MPI_INT,(lidx + 1) % np,tag,comm);
37: }
38: if (!lidx) {
39: MPI_Recv(0,0,MPI_INT,np-1,tag,comm,&status);
40: }
41: return(0);
42: }
44: /* ---------------------------------------------------------------------*/
45: /*
46: The variable Petsc_Seq_keyval is used to indicate an MPI attribute that
47: is attached to a communicator that manages the sequential phase code below.
48: */
49: static int Petsc_Seq_keyval = MPI_KEYVAL_INVALID;
51: /*@C
52: PetscSequentialPhaseBegin - Begins a sequential section of code.
54: Collective on MPI_Comm
56: Input Parameters:
57: + comm - Communicator to sequentialize.
58: - ng - Number in processor group. This many processes are allowed to execute
59: at the same time (usually 1)
61: Level: intermediate
63: Notes:
64: PetscSequentialPhaseBegin() and PetscSequentialPhaseEnd() provide a
65: way to force a section of code to be executed by the processes in
66: rank order. Typically, this is done with
67: .vb
68: PetscSequentialPhaseBegin(comm, 1);
69: <code to be executed sequentially>
70: PetscSequentialPhaseEnd(comm, 1);
71: .ve
73: Often, the sequential code contains output statements (e.g., printf) to
74: be executed. Note that you may need to flush the I/O buffers before
75: calling PetscSequentialPhaseEnd(). Also, note that some systems do
76: not propagate I/O in any order to the controling terminal (in other words,
77: even if you flush the output, you may not get the data in the order
78: that you want).
80: .seealso: PetscSequentialPhaseEnd()
82: Concepts: sequential stage
84: @*/
85: int PetscSequentialPhaseBegin(MPI_Comm comm,int ng)
86: {
87: int ierr,np;
88: MPI_Comm local_comm,*addr_local_comm;
91: MPI_Comm_size(comm,&np);
92: if (np == 1) return(0);
94: /* Get the private communicator for the sequential operations */
95: if (Petsc_Seq_keyval == MPI_KEYVAL_INVALID) {
96: MPI_Keyval_create(MPI_NULL_COPY_FN,MPI_NULL_DELETE_FN,&Petsc_Seq_keyval,0);
97: }
99: MPI_Comm_dup(comm,&local_comm);
100: PetscMalloc(sizeof(MPI_Comm),&addr_local_comm);
101: *addr_local_comm = local_comm;
102: MPI_Attr_put(comm,Petsc_Seq_keyval,(void*)addr_local_comm);
103: PetscSequentialPhaseBegin_Private(local_comm,ng);
104: return(0);
105: }
107: /*@C
108: PetscSequentialPhaseEnd - Ends a sequential section of code.
110: Collective on MPI_Comm
112: Input Parameters:
113: + comm - Communicator to sequentialize.
114: - ng - Number in processor group. This many processes are allowed to execute
115: at the same time (usually 1)
117: Level: intermediate
119: Notes:
120: See PetscSequentialPhaseBegin() for more details.
122: .seealso: PetscSequentialPhaseBegin()
124: Concepts: sequential stage
126: @*/
127: int PetscSequentialPhaseEnd(MPI_Comm comm,int ng)
128: {
129: int ierr,np,flag;
130: MPI_Comm local_comm,*addr_local_comm;
133: MPI_Comm_size(comm,&np);
134: if (np == 1) return(0);
136: MPI_Attr_get(comm,Petsc_Seq_keyval,(void **)&addr_local_comm,&flag);
137: if (!flag) {
138: SETERRQ(1,"Wrong MPI communicator; must pass in one used with PetscSequentialPhaseBegin()");
139: }
140: local_comm = *addr_local_comm;
142: PetscSequentialPhaseEnd_Private(local_comm,ng);
144: PetscFree(addr_local_comm);
145: MPI_Comm_free(&local_comm);
146: MPI_Attr_delete(comm,Petsc_Seq_keyval);
148: return(0);
149: }