Actual source code: mmdense.c
1: #define PETSCMAT_DLL
3: /*
4: Support for the parallel dense matrix vector multiply
5: */
6: #include src/mat/impls/dense/mpi/mpidense.h
10: PetscErrorCode MatSetUpMultiply_MPIDense(Mat mat)
11: {
12: Mat_MPIDense *mdn = (Mat_MPIDense*)mat->data;
14: IS from,to;
15: Vec gvec;
18: /* Create local vector that is used to scatter into */
19: VecCreateSeq(PETSC_COMM_SELF,mat->N,&mdn->lvec);
21: /* Create temporary index set for building scatter gather */
22: ISCreateStride(mat->comm,mat->N,0,1,&from);
23: ISCreateStride(PETSC_COMM_SELF,mat->N,0,1,&to);
25: /* Create temporary global vector to generate scatter context */
26: /* n = mdn->cowners[mdn->rank+1] - mdn->cowners[mdn->rank]; */
28: VecCreateMPI(mat->comm,mdn->nvec,mat->N,&gvec);
30: /* Generate the scatter context */
31: VecScatterCreate(gvec,from,mdn->lvec,to,&mdn->Mvctx);
32: PetscLogObjectParent(mat,mdn->Mvctx);
33: PetscLogObjectParent(mat,mdn->lvec);
34: PetscLogObjectParent(mat,from);
35: PetscLogObjectParent(mat,to);
36: PetscLogObjectParent(mat,gvec);
38: ISDestroy(to);
39: ISDestroy(from);
40: VecDestroy(gvec);
41: return(0);
42: }
44: EXTERN PetscErrorCode MatGetSubMatrices_MPIDense_Local(Mat,PetscInt,const IS[],const IS[],MatReuse,Mat*);
47: PetscErrorCode MatGetSubMatrices_MPIDense(Mat C,PetscInt ismax,const IS isrow[],const IS iscol[],MatReuse scall,Mat *submat[])
48: {
50: PetscInt nmax,nstages_local,nstages,i,pos,max_no;
53: /* Allocate memory to hold all the submatrices */
54: if (scall != MAT_REUSE_MATRIX) {
55: PetscMalloc((ismax+1)*sizeof(Mat),submat);
56: }
57: /* Determine the number of stages through which submatrices are done */
58: nmax = 20*1000000 / (C->N * sizeof(PetscInt));
59: if (!nmax) nmax = 1;
60: nstages_local = ismax/nmax + ((ismax % nmax)?1:0);
62: /* Make sure every processor loops through the nstages */
63: MPI_Allreduce(&nstages_local,&nstages,1,MPIU_INT,MPI_MAX,C->comm);
66: for (i=0,pos=0; i<nstages; i++) {
67: if (pos+nmax <= ismax) max_no = nmax;
68: else if (pos == ismax) max_no = 0;
69: else max_no = ismax-pos;
70: MatGetSubMatrices_MPIDense_Local(C,max_no,isrow+pos,iscol+pos,scall,*submat+pos);
71: pos += max_no;
72: }
73: return(0);
74: }
75: /* -------------------------------------------------------------------------*/
78: PetscErrorCode MatGetSubMatrices_MPIDense_Local(Mat C,PetscInt ismax,const IS isrow[],const IS iscol[],MatReuse scall,Mat *submats)
79: {
80: Mat_MPIDense *c = (Mat_MPIDense*)C->data;
81: Mat A = c->A;
82: Mat_SeqDense *a = (Mat_SeqDense*)A->data,*mat;
84: PetscMPIInt rank,size,tag0,tag1,idex,end,i;
85: PetscInt N = C->N,rstart = c->rstart,count;
86: PetscInt **irow,**icol,*nrow,*ncol,*w1,*w3,*w4,*rtable,start;
87: PetscInt **sbuf1,m,j,k,l,ct1,**rbuf1,row,proc;
88: PetscInt nrqs,msz,**ptr,*ctr,*pa,*tmp,bsz,nrqr;
89: PetscInt is_no,jmax,*irow_i,**rmap,*rmap_i;
90: PetscInt len,ctr_j,*sbuf1_j,*rbuf1_i;
91: MPI_Request *s_waits1,*r_waits1,*s_waits2,*r_waits2;
92: MPI_Status *r_status1,*r_status2,*s_status1,*s_status2;
93: MPI_Comm comm;
94: PetscScalar **rbuf2,**sbuf2;
95: PetscTruth sorted;
98: comm = C->comm;
99: tag0 = C->tag;
100: size = c->size;
101: rank = c->rank;
102: m = C->M;
103:
104: /* Get some new tags to keep the communication clean */
105: PetscObjectGetNewTag((PetscObject)C,&tag1);
107: /* Check if the col indices are sorted */
108: for (i=0; i<ismax; i++) {
109: ISSorted(isrow[i],&sorted);
110: if (!sorted) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"ISrow is not sorted");
111: ISSorted(iscol[i],&sorted);
112: if (!sorted) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"IScol is not sorted");
113: }
115: len = 2*ismax*(sizeof(PetscInt*)+sizeof(PetscInt)) + (m+1)*sizeof(PetscInt);
116: PetscMalloc(len,&irow);
117: icol = irow + ismax;
118: nrow = (PetscInt*)(icol + ismax);
119: ncol = nrow + ismax;
120: rtable = ncol + ismax;
122: for (i=0; i<ismax; i++) {
123: ISGetIndices(isrow[i],&irow[i]);
124: ISGetIndices(iscol[i],&icol[i]);
125: ISGetLocalSize(isrow[i],&nrow[i]);
126: ISGetLocalSize(iscol[i],&ncol[i]);
127: }
129: /* Create hash table for the mapping :row -> proc*/
130: for (i=0,j=0; i<size; i++) {
131: jmax = c->rowners[i+1];
132: for (; j<jmax; j++) {
133: rtable[j] = i;
134: }
135: }
137: /* evaluate communication - mesg to who,length of mesg, and buffer space
138: required. Based on this, buffers are allocated, and data copied into them*/
139: PetscMalloc(size*4*sizeof(PetscInt),&w1); /* mesg size */
140: w3 = w1 + 2*size; /* no of IS that needs to be sent to proc i */
141: w4 = w3 + size; /* temp work space used in determining w1, w3 */
142: PetscMemzero(w1,size*3*sizeof(PetscInt)); /* initialize work vector*/
143: for (i=0; i<ismax; i++) {
144: PetscMemzero(w4,size*sizeof(PetscInt)); /* initialize work vector*/
145: jmax = nrow[i];
146: irow_i = irow[i];
147: for (j=0; j<jmax; j++) {
148: row = irow_i[j];
149: proc = rtable[row];
150: w4[proc]++;
151: }
152: for (j=0; j<size; j++) {
153: if (w4[j]) { w1[2*j] += w4[j]; w3[j]++;}
154: }
155: }
156:
157: nrqs = 0; /* no of outgoing messages */
158: msz = 0; /* total mesg length (for all procs) */
159: w1[2*rank] = 0; /* no mesg sent to self */
160: w3[rank] = 0;
161: for (i=0; i<size; i++) {
162: if (w1[2*i]) { w1[2*i+1] = 1; nrqs++;} /* there exists a message to proc i */
163: }
164: PetscMalloc((nrqs+1)*sizeof(PetscInt),&pa); /*(proc -array)*/
165: for (i=0,j=0; i<size; i++) {
166: if (w1[2*i]) { pa[j] = i; j++; }
167: }
169: /* Each message would have a header = 1 + 2*(no of IS) + data */
170: for (i=0; i<nrqs; i++) {
171: j = pa[i];
172: w1[2*j] += w1[2*j+1] + 2* w3[j];
173: msz += w1[2*j];
174: }
175: /* Do a global reduction to determine how many messages to expect*/
176: PetscMaxSum(comm,w1,&bsz,&nrqr);
178: /* Allocate memory for recv buffers . Prob none if nrqr = 0 ???? */
179: len = (nrqr+1)*sizeof(PetscInt*) + nrqr*bsz*sizeof(PetscInt);
180: PetscMalloc(len,&rbuf1);
181: rbuf1[0] = (PetscInt*)(rbuf1 + nrqr);
182: for (i=1; i<nrqr; ++i) rbuf1[i] = rbuf1[i-1] + bsz;
183:
184: /* Post the receives */
185: PetscMalloc((nrqr+1)*sizeof(MPI_Request),&r_waits1);
186: for (i=0; i<nrqr; ++i) {
187: MPI_Irecv(rbuf1[i],bsz,MPIU_INT,MPI_ANY_SOURCE,tag0,comm,r_waits1+i);
188: }
190: /* Allocate Memory for outgoing messages */
191: len = 2*size*sizeof(PetscInt*) + 2*msz*sizeof(PetscInt)+ size*sizeof(PetscInt);
192: PetscMalloc(len,&sbuf1);
193: ptr = sbuf1 + size; /* Pointers to the data in outgoing buffers */
194: PetscMemzero(sbuf1,2*size*sizeof(PetscInt*));
195: /* allocate memory for outgoing data + buf to receive the first reply */
196: tmp = (PetscInt*)(ptr + size);
197: ctr = tmp + 2*msz;
199: {
200: PetscInt *iptr = tmp,ict = 0;
201: for (i=0; i<nrqs; i++) {
202: j = pa[i];
203: iptr += ict;
204: sbuf1[j] = iptr;
205: ict = w1[2*j];
206: }
207: }
209: /* Form the outgoing messages */
210: /* Initialize the header space */
211: for (i=0; i<nrqs; i++) {
212: j = pa[i];
213: sbuf1[j][0] = 0;
214: PetscMemzero(sbuf1[j]+1,2*w3[j]*sizeof(PetscInt));
215: ptr[j] = sbuf1[j] + 2*w3[j] + 1;
216: }
217:
218: /* Parse the isrow and copy data into outbuf */
219: for (i=0; i<ismax; i++) {
220: PetscMemzero(ctr,size*sizeof(PetscInt));
221: irow_i = irow[i];
222: jmax = nrow[i];
223: for (j=0; j<jmax; j++) { /* parse the indices of each IS */
224: row = irow_i[j];
225: proc = rtable[row];
226: if (proc != rank) { /* copy to the outgoing buf*/
227: ctr[proc]++;
228: *ptr[proc] = row;
229: ptr[proc]++;
230: }
231: }
232: /* Update the headers for the current IS */
233: for (j=0; j<size; j++) { /* Can Optimise this loop too */
234: if ((ctr_j = ctr[j])) {
235: sbuf1_j = sbuf1[j];
236: k = ++sbuf1_j[0];
237: sbuf1_j[2*k] = ctr_j;
238: sbuf1_j[2*k-1] = i;
239: }
240: }
241: }
243: /* Now post the sends */
244: PetscMalloc((nrqs+1)*sizeof(MPI_Request),&s_waits1);
245: for (i=0; i<nrqs; ++i) {
246: j = pa[i];
247: MPI_Isend(sbuf1[j],w1[2*j],MPIU_INT,j,tag0,comm,s_waits1+i);
248: }
250: /* Post recieves to capture the row_data from other procs */
251: PetscMalloc((nrqs+1)*sizeof(MPI_Request),&r_waits2);
252: PetscMalloc((nrqs+1)*sizeof(PetscScalar*),&rbuf2);
253: for (i=0; i<nrqs; i++) {
254: j = pa[i];
255: count = (w1[2*j] - (2*sbuf1[j][0] + 1))*N;
256: PetscMalloc((count+1)*sizeof(PetscScalar),&rbuf2[i]);
257: MPI_Irecv(rbuf2[i],count,MPIU_SCALAR,j,tag1,comm,r_waits2+i);
258: }
260: /* Receive messages(row_nos) and then, pack and send off the rowvalues
261: to the correct processors */
263: PetscMalloc((nrqr+1)*sizeof(MPI_Request),&s_waits2);
264: PetscMalloc((nrqr+1)*sizeof(MPI_Status),&r_status1);
265: PetscMalloc((nrqr+1)*sizeof(PetscScalar*),&sbuf2);
266:
267: {
268: PetscScalar *sbuf2_i,*v_start;
269: PetscInt s_proc;
270: for (i=0; i<nrqr; ++i) {
271: MPI_Waitany(nrqr,r_waits1,&idex,r_status1+i);
272: s_proc = r_status1[i].MPI_SOURCE; /* send processor */
273: rbuf1_i = rbuf1[idex]; /* Actual message from s_proc */
274: /* no of rows = end - start; since start is array idex[], 0idex, whel end
275: is length of the buffer - which is 1idex */
276: start = 2*rbuf1_i[0] + 1;
277: MPI_Get_count(r_status1+i,MPIU_INT,&end);
278: /* allocate memory sufficinet to hold all the row values */
279: PetscMalloc((end-start)*N*sizeof(PetscScalar),&sbuf2[idex]);
280: sbuf2_i = sbuf2[idex];
281: /* Now pack the data */
282: for (j=start; j<end; j++) {
283: row = rbuf1_i[j] - rstart;
284: v_start = a->v + row;
285: for (k=0; k<N; k++) {
286: sbuf2_i[0] = v_start[0];
287: sbuf2_i++; v_start += C->m;
288: }
289: }
290: /* Now send off the data */
291: MPI_Isend(sbuf2[idex],(end-start)*N,MPIU_SCALAR,s_proc,tag1,comm,s_waits2+i);
292: }
293: }
294: /* End Send-Recv of IS + row_numbers */
295: PetscFree(r_status1);
296: PetscFree(r_waits1);
297: PetscMalloc((nrqs+1)*sizeof(MPI_Status),&s_status1);
298: if (nrqs) {MPI_Waitall(nrqs,s_waits1,s_status1);}
299: PetscFree(s_status1);
300: PetscFree(s_waits1);
302: /* Create the submatrices */
303: if (scall == MAT_REUSE_MATRIX) {
304: for (i=0; i<ismax; i++) {
305: mat = (Mat_SeqDense *)(submats[i]->data);
306: if ((submats[i]->m != nrow[i]) || (submats[i]->n != ncol[i])) {
307: SETERRQ(PETSC_ERR_ARG_SIZ,"Cannot reuse matrix. wrong size");
308: }
309: PetscMemzero(mat->v,submats[i]->m*submats[i]->n*sizeof(PetscScalar));
310: submats[i]->factor = C->factor;
311: }
312: } else {
313: for (i=0; i<ismax; i++) {
314: MatCreate(PETSC_COMM_SELF,submats+i);
315: MatSetSizes(submats[i],nrow[i],ncol[i],nrow[i],ncol[i]);
316: MatSetType(submats[i],A->type_name);
317: MatSeqDenseSetPreallocation(submats[i],PETSC_NULL);
318: }
319: }
320:
321: /* Assemble the matrices */
322: {
323: PetscInt col;
324: PetscScalar *imat_v,*mat_v,*imat_vi,*mat_vi;
325:
326: for (i=0; i<ismax; i++) {
327: mat = (Mat_SeqDense*)submats[i]->data;
328: mat_v = a->v;
329: imat_v = mat->v;
330: irow_i = irow[i];
331: m = nrow[i];
332: for (j=0; j<m; j++) {
333: row = irow_i[j] ;
334: proc = rtable[row];
335: if (proc == rank) {
336: row = row - rstart;
337: mat_vi = mat_v + row;
338: imat_vi = imat_v + j;
339: for (k=0; k<ncol[i]; k++) {
340: col = icol[i][k];
341: imat_vi[k*m] = mat_vi[col*C->m];
342: }
343: }
344: }
345: }
346: }
348: /* Create row map. This maps c->row to submat->row for each submat*/
349: /* this is a very expensive operation wrt memory usage */
350: len = (1+ismax)*sizeof(PetscInt*)+ ismax*C->M*sizeof(PetscInt);
351: PetscMalloc(len,&rmap);
352: rmap[0] = (PetscInt*)(rmap + ismax);
353: PetscMemzero(rmap[0],ismax*C->M*sizeof(PetscInt));
354: for (i=1; i<ismax; i++) { rmap[i] = rmap[i-1] + C->M;}
355: for (i=0; i<ismax; i++) {
356: rmap_i = rmap[i];
357: irow_i = irow[i];
358: jmax = nrow[i];
359: for (j=0; j<jmax; j++) {
360: rmap_i[irow_i[j]] = j;
361: }
362: }
363:
364: /* Now Receive the row_values and assemble the rest of the matrix */
365: PetscMalloc((nrqs+1)*sizeof(MPI_Status),&r_status2);
367: {
368: PetscInt is_max,tmp1,col,*sbuf1_i,is_sz;
369: PetscScalar *rbuf2_i,*imat_v,*imat_vi;
370:
371: for (tmp1=0; tmp1<nrqs; tmp1++) { /* For each message */
372: MPI_Waitany(nrqs,r_waits2,&i,r_status2+tmp1);
373: /* Now dig out the corresponding sbuf1, which contains the IS data_structure */
374: sbuf1_i = sbuf1[pa[i]];
375: is_max = sbuf1_i[0];
376: ct1 = 2*is_max+1;
377: rbuf2_i = rbuf2[i];
378: for (j=1; j<=is_max; j++) { /* For each IS belonging to the message */
379: is_no = sbuf1_i[2*j-1];
380: is_sz = sbuf1_i[2*j];
381: mat = (Mat_SeqDense*)submats[is_no]->data;
382: imat_v = mat->v;
383: rmap_i = rmap[is_no];
384: m = nrow[is_no];
385: for (k=0; k<is_sz; k++,rbuf2_i+=N) { /* For each row */
386: row = sbuf1_i[ct1]; ct1++;
387: row = rmap_i[row];
388: imat_vi = imat_v + row;
389: for (l=0; l<ncol[is_no]; l++) { /* For each col */
390: col = icol[is_no][l];
391: imat_vi[l*m] = rbuf2_i[col];
392: }
393: }
394: }
395: }
396: }
397: /* End Send-Recv of row_values */
398: PetscFree(r_status2);
399: PetscFree(r_waits2);
400: PetscMalloc((nrqr+1)*sizeof(MPI_Status),&s_status2);
401: if (nrqr) {MPI_Waitall(nrqr,s_waits2,s_status2);}
402: PetscFree(s_status2);
403: PetscFree(s_waits2);
405: /* Restore the indices */
406: for (i=0; i<ismax; i++) {
407: ISRestoreIndices(isrow[i],irow+i);
408: ISRestoreIndices(iscol[i],icol+i);
409: }
411: /* Destroy allocated memory */
412: PetscFree(irow);
413: PetscFree(w1);
414: PetscFree(pa);
417: for (i=0; i<nrqs; ++i) {
418: PetscFree(rbuf2[i]);
419: }
420: PetscFree(rbuf2);
421: PetscFree(sbuf1);
422: PetscFree(rbuf1);
424: for (i=0; i<nrqr; ++i) {
425: PetscFree(sbuf2[i]);
426: }
428: PetscFree(sbuf2);
429: PetscFree(rmap);
431: for (i=0; i<ismax; i++) {
432: MatAssemblyBegin(submats[i],MAT_FINAL_ASSEMBLY);
433: MatAssemblyEnd(submats[i],MAT_FINAL_ASSEMBLY);
434: }
436: return(0);
437: }