Actual source code: mmdense.c
1: /*$Id: mmdense.c,v 1.40 2001/09/07 20:09:22 bsmith Exp $*/
3: /*
4: Support for the parallel dense matrix vector multiply
5: */
6: #include src/mat/impls/dense/mpi/mpidense.h
7: #include src/vec/vecimpl.h
9: int MatSetUpMultiply_MPIDense(Mat mat)
10: {
11: Mat_MPIDense *mdn = (Mat_MPIDense*)mat->data;
12: int ierr;
13: IS from,to;
14: Vec gvec;
17: /* Create local vector that is used to scatter into */
18: VecCreateSeq(PETSC_COMM_SELF,mat->N,&mdn->lvec);
20: /* Create temporary index set for building scatter gather */
21: ISCreateStride(mat->comm,mat->N,0,1,&from);
22: ISCreateStride(PETSC_COMM_SELF,mat->N,0,1,&to);
24: /* Create temporary global vector to generate scatter context */
25: /* n = mdn->cowners[mdn->rank+1] - mdn->cowners[mdn->rank]; */
27: VecCreateMPI(mat->comm,mdn->nvec,mat->N,&gvec);
29: /* Generate the scatter context */
30: VecScatterCreate(gvec,from,mdn->lvec,to,&mdn->Mvctx);
31: PetscLogObjectParent(mat,mdn->Mvctx);
32: PetscLogObjectParent(mat,mdn->lvec);
33: PetscLogObjectParent(mat,from);
34: PetscLogObjectParent(mat,to);
35: PetscLogObjectParent(mat,gvec);
37: ISDestroy(to);
38: ISDestroy(from);
39: VecDestroy(gvec);
40: return(0);
41: }
43: EXTERN int MatGetSubMatrices_MPIDense_Local(Mat,int,IS*,IS*,MatReuse,Mat*);
44: int MatGetSubMatrices_MPIDense(Mat C,int ismax,IS *isrow,IS *iscol,MatReuse scall,Mat **submat)
45: {
46: int nmax,nstages_local,nstages,i,pos,max_no,ierr;
49: /* Allocate memory to hold all the submatrices */
50: if (scall != MAT_REUSE_MATRIX) {
51: PetscMalloc((ismax+1)*sizeof(Mat),submat);
52: }
53: /* Determine the number of stages through which submatrices are done */
54: nmax = 20*1000000 / (C->N * sizeof(int));
55: if (!nmax) nmax = 1;
56: nstages_local = ismax/nmax + ((ismax % nmax)?1:0);
58: /* Make sure every processor loops through the nstages */
59: MPI_Allreduce(&nstages_local,&nstages,1,MPI_INT,MPI_MAX,C->comm);
62: for (i=0,pos=0; i<nstages; i++) {
63: if (pos+nmax <= ismax) max_no = nmax;
64: else if (pos == ismax) max_no = 0;
65: else max_no = ismax-pos;
66: MatGetSubMatrices_MPIDense_Local(C,max_no,isrow+pos,iscol+pos,scall,*submat+pos);
67: pos += max_no;
68: }
69: return(0);
70: }
71: /* -------------------------------------------------------------------------*/
72: int MatGetSubMatrices_MPIDense_Local(Mat C,int ismax,IS *isrow,IS *iscol,MatReuse scall,Mat *submats)
73: {
74: Mat_MPIDense *c = (Mat_MPIDense*)C->data;
75: Mat A = c->A;
76: Mat_SeqDense *a = (Mat_SeqDense*)A->data,*mat;
77: int N = C->N,rstart = c->rstart,count;
78: int **irow,**icol,*nrow,*ncol,*w1,*w2,*w3,*w4,*rtable,start,end,size;
79: int **sbuf1,rank,m,i,j,k,l,ct1,ierr,**rbuf1,row,proc;
80: int nrqs,msz,**ptr,index,*ctr,*pa,*tmp,bsz,nrqr;
81: int is_no,jmax,*irow_i,**rmap,*rmap_i;
82: int len,ctr_j,*sbuf1_j,*rbuf1_i;
83: int tag0,tag1;
84: MPI_Request *s_waits1,*r_waits1,*s_waits2,*r_waits2;
85: MPI_Status *r_status1,*r_status2,*s_status1,*s_status2;
86: MPI_Comm comm;
87: PetscScalar **rbuf2,**sbuf2;
90: comm = C->comm;
91: tag0 = C->tag;
92: size = c->size;
93: rank = c->rank;
94: m = C->M;
95:
96: /* Get some new tags to keep the communication clean */
97: PetscObjectGetNewTag((PetscObject)C,&tag1);
99: /* Check if the col indices are sorted */
100: for (i=0; i<ismax; i++) {
101: ISSorted(isrow[i],(PetscTruth*)&j);
102: if (!j) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"ISrow is not sorted");
103: ISSorted(iscol[i],(PetscTruth*)&j);
104: if (!j) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"IScol is not sorted");
105: }
107: len = 2*ismax*(sizeof(int *)+sizeof(int)) + (m+1)*sizeof(int);
108: ierr = PetscMalloc(len,&irow);
109: icol = irow + ismax;
110: nrow = (int*)(icol + ismax);
111: ncol = nrow + ismax;
112: rtable = ncol + ismax;
114: for (i=0; i<ismax; i++) {
115: ISGetIndices(isrow[i],&irow[i]);
116: ISGetIndices(iscol[i],&icol[i]);
117: ISGetLocalSize(isrow[i],&nrow[i]);
118: ISGetLocalSize(iscol[i],&ncol[i]);
119: }
121: /* Create hash table for the mapping :row -> proc*/
122: for (i=0,j=0; i<size; i++) {
123: jmax = c->rowners[i+1];
124: for (; j<jmax; j++) {
125: rtable[j] = i;
126: }
127: }
129: /* evaluate communication - mesg to who,length of mesg, and buffer space
130: required. Based on this, buffers are allocated, and data copied into them*/
131: ierr = PetscMalloc(size*4*sizeof(int),&w1); /* mesg size */
132: w2 = w1 + size; /* if w2[i] marked, then a message to proc i*/
133: w3 = w2 + size; /* no of IS that needs to be sent to proc i */
134: w4 = w3 + size; /* temp work space used in determining w1, w2, w3 */
135: PetscMemzero(w1,size*3*sizeof(int)); /* initialize work vector*/
136: for (i=0; i<ismax; i++) {
137: ierr = PetscMemzero(w4,size*sizeof(int)); /* initialize work vector*/
138: jmax = nrow[i];
139: irow_i = irow[i];
140: for (j=0; j<jmax; j++) {
141: row = irow_i[j];
142: proc = rtable[row];
143: w4[proc]++;
144: }
145: for (j=0; j<size; j++) {
146: if (w4[j]) { w1[j] += w4[j]; w3[j]++;}
147: }
148: }
149:
150: nrqs = 0; /* no of outgoing messages */
151: msz = 0; /* total mesg length (for all procs) */
152: w1[rank] = 0; /* no mesg sent to self */
153: w3[rank] = 0;
154: for (i=0; i<size; i++) {
155: if (w1[i]) { w2[i] = 1; nrqs++;} /* there exists a message to proc i */
156: }
157: PetscMalloc((nrqs+1)*sizeof(int),&pa); /*(proc -array)*/
158: for (i=0,j=0; i<size; i++) {
159: if (w1[i]) { pa[j] = i; j++; }
160: }
162: /* Each message would have a header = 1 + 2*(no of IS) + data */
163: for (i=0; i<nrqs; i++) {
164: j = pa[i];
165: w1[j] += w2[j] + 2* w3[j];
166: msz += w1[j];
167: }
168: /* Do a global reduction to determine how many messages to expect*/
169: {
170: int *rw1;
171: ierr = PetscMalloc(2*size*sizeof(int),&rw1);
172: ierr = MPI_Allreduce(w1,rw1,2*size,MPI_INT,PetscMaxSum_Op,comm);
173: bsz = rw1[rank];
174: nrqr = rw1[size+rank];
175: ierr = PetscFree(rw1);
176: }
178: /* Allocate memory for recv buffers . Prob none if nrqr = 0 ???? */
179: len = (nrqr+1)*sizeof(int*) + nrqr*bsz*sizeof(int);
180: ierr = PetscMalloc(len,&rbuf1);
181: rbuf1[0] = (int*)(rbuf1 + nrqr);
182: for (i=1; i<nrqr; ++i) rbuf1[i] = rbuf1[i-1] + bsz;
183:
184: /* Post the receives */
185: PetscMalloc((nrqr+1)*sizeof(MPI_Request),&r_waits1);
186: for (i=0; i<nrqr; ++i) {
187: MPI_Irecv(rbuf1[i],bsz,MPI_INT,MPI_ANY_SOURCE,tag0,comm,r_waits1+i);
188: }
190: /* Allocate Memory for outgoing messages */
191: len = 2*size*sizeof(int*) + 2*msz*sizeof(int)+ size*sizeof(int);
192: ierr = PetscMalloc(len,&sbuf1);
193: ptr = sbuf1 + size; /* Pointers to the data in outgoing buffers */
194: ierr = PetscMemzero(sbuf1,2*size*sizeof(int*));
195: /* allocate memory for outgoing data + buf to receive the first reply */
196: tmp = (int*)(ptr + size);
197: ctr = tmp + 2*msz;
199: {
200: int *iptr = tmp,ict = 0;
201: for (i=0; i<nrqs; i++) {
202: j = pa[i];
203: iptr += ict;
204: sbuf1[j] = iptr;
205: ict = w1[j];
206: }
207: }
209: /* Form the outgoing messages */
210: /* Initialize the header space */
211: for (i=0; i<nrqs; i++) {
212: j = pa[i];
213: sbuf1[j][0] = 0;
214: ierr = PetscMemzero(sbuf1[j]+1,2*w3[j]*sizeof(int));
215: ptr[j] = sbuf1[j] + 2*w3[j] + 1;
216: }
217:
218: /* Parse the isrow and copy data into outbuf */
219: for (i=0; i<ismax; i++) {
220: PetscMemzero(ctr,size*sizeof(int));
221: irow_i = irow[i];
222: jmax = nrow[i];
223: for (j=0; j<jmax; j++) { /* parse the indices of each IS */
224: row = irow_i[j];
225: proc = rtable[row];
226: if (proc != rank) { /* copy to the outgoing buf*/
227: ctr[proc]++;
228: *ptr[proc] = row;
229: ptr[proc]++;
230: }
231: }
232: /* Update the headers for the current IS */
233: for (j=0; j<size; j++) { /* Can Optimise this loop too */
234: if ((ctr_j = ctr[j])) {
235: sbuf1_j = sbuf1[j];
236: k = ++sbuf1_j[0];
237: sbuf1_j[2*k] = ctr_j;
238: sbuf1_j[2*k-1] = i;
239: }
240: }
241: }
243: /* Now post the sends */
244: PetscMalloc((nrqs+1)*sizeof(MPI_Request),&s_waits1);
245: for (i=0; i<nrqs; ++i) {
246: j = pa[i];
247: MPI_Isend(sbuf1[j],w1[j],MPI_INT,j,tag0,comm,s_waits1+i);
248: }
250: /* Post recieves to capture the row_data from other procs */
251: ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Request),&r_waits2);
252: ierr = PetscMalloc((nrqs+1)*sizeof(PetscScalar*),&rbuf2);
253: for (i=0; i<nrqs; i++) {
254: j = pa[i];
255: count = (w1[j] - (2*sbuf1[j][0] + 1))*N;
256: ierr = PetscMalloc((count+1)*sizeof(PetscScalar),&rbuf2[i]);
257: ierr = MPI_Irecv(rbuf2[i],count,MPIU_SCALAR,j,tag1,comm,r_waits2+i);
258: }
260: /* Receive messages(row_nos) and then, pack and send off the rowvalues
261: to the correct processors */
263: PetscMalloc((nrqr+1)*sizeof(MPI_Request),&s_waits2);
264: PetscMalloc((nrqr+1)*sizeof(MPI_Status),&r_status1);
265: PetscMalloc((nrqr+1)*sizeof(PetscScalar*),&sbuf2);
266:
267: {
268: PetscScalar *sbuf2_i,*v_start;
269: int s_proc;
270: for (i=0; i<nrqr; ++i) {
271: MPI_Waitany(nrqr,r_waits1,&index,r_status1+i);
272: s_proc = r_status1[i].MPI_SOURCE; /* send processor */
273: rbuf1_i = rbuf1[index]; /* Actual message from s_proc */
274: /* no of rows = end - start; since start is array index[], 0index, whel end
275: is length of the buffer - which is 1index */
276: start = 2*rbuf1_i[0] + 1;
277: ierr = MPI_Get_count(r_status1+i,MPI_INT,&end);
278: /* allocate memory sufficinet to hold all the row values */
279: PetscMalloc((end-start)*N*sizeof(PetscScalar),&sbuf2[index]);
280: sbuf2_i = sbuf2[index];
281: /* Now pack the data */
282: for (j=start; j<end; j++) {
283: row = rbuf1_i[j] - rstart;
284: v_start = a->v + row;
285: for (k=0; k<N; k++) {
286: sbuf2_i[0] = v_start[0];
287: sbuf2_i++; v_start += C->m;
288: }
289: }
290: /* Now send off the data */
291: MPI_Isend(sbuf2[index],(end-start)*N,MPIU_SCALAR,s_proc,tag1,comm,s_waits2+i);
292: }
293: }
294: /* End Send-Recv of IS + row_numbers */
295: PetscFree(r_status1);
296: PetscFree(r_waits1);
297: PetscMalloc((nrqs+1)*sizeof(MPI_Status),&s_status1);
298: ierr = MPI_Waitall(nrqs,s_waits1,s_status1);
299: PetscFree(s_status1);
300: PetscFree(s_waits1);
302: /* Create the submatrices */
303: if (scall == MAT_REUSE_MATRIX) {
304: for (i=0; i<ismax; i++) {
305: mat = (Mat_SeqDense *)(submats[i]->data);
306: if ((submats[i]->m != nrow[i]) || (submats[i]->n != ncol[i])) {
307: SETERRQ(PETSC_ERR_ARG_SIZ,"Cannot reuse matrix. wrong size");
308: }
309: PetscMemzero(mat->v,submats[i]->m*submats[i]->n*sizeof(PetscScalar));
310: submats[i]->factor = C->factor;
311: }
312: } else {
313: for (i=0; i<ismax; i++) {
314: MatCreateSeqDense(PETSC_COMM_SELF,nrow[i],ncol[i],PETSC_NULL,submats+i);
315: }
316: }
317:
318: /* Assemble the matrices */
319: {
320: int col;
321: PetscScalar *imat_v,*mat_v,*imat_vi,*mat_vi;
322:
323: for (i=0; i<ismax; i++) {
324: mat = (Mat_SeqDense*)submats[i]->data;
325: mat_v = a->v;
326: imat_v = mat->v;
327: irow_i = irow[i];
328: m = nrow[i];
329: for (j=0; j<m; j++) {
330: row = irow_i[j] ;
331: proc = rtable[row];
332: if (proc == rank) {
333: row = row - rstart;
334: mat_vi = mat_v + row;
335: imat_vi = imat_v + j;
336: for (k=0; k<ncol[i]; k++) {
337: col = icol[i][k];
338: imat_vi[k*m] = mat_vi[col*C->m];
339: }
340: }
341: }
342: }
343: }
345: /* Create row map. This maps c->row to submat->row for each submat*/
346: /* this is a very expensive operation wrt memory usage */
347: len = (1+ismax)*sizeof(int*)+ ismax*C->M*sizeof(int);
348: ierr = PetscMalloc(len,&rmap);
349: rmap[0] = (int *)(rmap + ismax);
350: ierr = PetscMemzero(rmap[0],ismax*C->M*sizeof(int));
351: for (i=1; i<ismax; i++) { rmap[i] = rmap[i-1] + C->M;}
352: for (i=0; i<ismax; i++) {
353: rmap_i = rmap[i];
354: irow_i = irow[i];
355: jmax = nrow[i];
356: for (j=0; j<jmax; j++) {
357: rmap_i[irow_i[j]] = j;
358: }
359: }
360:
361: /* Now Receive the row_values and assemble the rest of the matrix */
362: PetscMalloc((nrqs+1)*sizeof(MPI_Status),&r_status2);
364: {
365: int is_max,tmp1,col,*sbuf1_i,is_sz;
366: PetscScalar *rbuf2_i,*imat_v,*imat_vi;
367:
368: for (tmp1=0; tmp1<nrqs; tmp1++) { /* For each message */
369: MPI_Waitany(nrqs,r_waits2,&i,r_status2+tmp1);
370: /* Now dig out the corresponding sbuf1, which contains the IS data_structure */
371: sbuf1_i = sbuf1[pa[i]];
372: is_max = sbuf1_i[0];
373: ct1 = 2*is_max+1;
374: rbuf2_i = rbuf2[i];
375: for (j=1; j<=is_max; j++) { /* For each IS belonging to the message */
376: is_no = sbuf1_i[2*j-1];
377: is_sz = sbuf1_i[2*j];
378: mat = (Mat_SeqDense*)submats[is_no]->data;
379: imat_v = mat->v;
380: rmap_i = rmap[is_no];
381: m = nrow[is_no];
382: for (k=0; k<is_sz; k++,rbuf2_i+=N) { /* For each row */
383: row = sbuf1_i[ct1]; ct1++;
384: row = rmap_i[row];
385: imat_vi = imat_v + row;
386: for (l=0; l<ncol[is_no]; l++) { /* For each col */
387: col = icol[is_no][l];
388: imat_vi[l*m] = rbuf2_i[col];
389: }
390: }
391: }
392: }
393: }
394: /* End Send-Recv of row_values */
395: PetscFree(r_status2);
396: PetscFree(r_waits2);
397: PetscMalloc((nrqr+1)*sizeof(MPI_Status),&s_status2);
398: MPI_Waitall(nrqr,s_waits2,s_status2);
399: PetscFree(s_status2);
400: PetscFree(s_waits2);
402: /* Restore the indices */
403: for (i=0; i<ismax; i++) {
404: ISRestoreIndices(isrow[i],irow+i);
405: ISRestoreIndices(iscol[i],icol+i);
406: }
408: /* Destroy allocated memory */
409: PetscFree(irow);
410: PetscFree(w1);
411: PetscFree(pa);
414: for (i=0; i<nrqs; ++i) {
415: PetscFree(rbuf2[i]);
416: }
417: PetscFree(rbuf2);
418: PetscFree(sbuf1);
419: PetscFree(rbuf1);
421: for (i=0; i<nrqr; ++i) {
422: PetscFree(sbuf2[i]);
423: }
425: PetscFree(sbuf2);
426: PetscFree(rmap);
428: for (i=0; i<ismax; i++) {
429: MatAssemblyBegin(submats[i],MAT_FINAL_ASSEMBLY);
430: MatAssemblyEnd(submats[i],MAT_FINAL_ASSEMBLY);
431: }
433: return(0);
434: }