Actual source code: mpisbaij.c
1: #define PETSCMAT_DLL
3: #include src/mat/impls/baij/mpi/mpibaij.h
4: #include mpisbaij.h
5: #include src/mat/impls/sbaij/seq/sbaij.h
7: EXTERN PetscErrorCode MatSetUpMultiply_MPISBAIJ(Mat);
8: EXTERN PetscErrorCode MatSetUpMultiply_MPISBAIJ_2comm(Mat);
9: EXTERN PetscErrorCode DisAssemble_MPISBAIJ(Mat);
10: EXTERN PetscErrorCode MatIncreaseOverlap_MPISBAIJ(Mat,PetscInt,IS[],PetscInt);
11: EXTERN PetscErrorCode MatGetValues_SeqSBAIJ(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt[],PetscScalar []);
12: EXTERN PetscErrorCode MatGetValues_SeqBAIJ(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt[],PetscScalar []);
13: EXTERN PetscErrorCode MatSetValues_SeqSBAIJ(Mat,PetscInt,const PetscInt [],PetscInt,const PetscInt [],const PetscScalar [],InsertMode);
14: EXTERN PetscErrorCode MatSetValuesBlocked_SeqSBAIJ(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt[],const PetscScalar[],InsertMode);
15: EXTERN PetscErrorCode MatSetValuesBlocked_SeqBAIJ(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt[],const PetscScalar[],InsertMode);
16: EXTERN PetscErrorCode MatGetRow_SeqSBAIJ(Mat,PetscInt,PetscInt*,PetscInt**,PetscScalar**);
17: EXTERN PetscErrorCode MatRestoreRow_SeqSBAIJ(Mat,PetscInt,PetscInt*,PetscInt**,PetscScalar**);
18: EXTERN PetscErrorCode MatPrintHelp_SeqSBAIJ(Mat);
19: EXTERN PetscErrorCode MatZeroRows_SeqSBAIJ(Mat,IS,PetscScalar*);
20: EXTERN PetscErrorCode MatZeroRows_SeqBAIJ(Mat,IS,PetscScalar *);
21: EXTERN PetscErrorCode MatGetRowMax_MPISBAIJ(Mat,Vec);
22: EXTERN PetscErrorCode MatRelax_MPISBAIJ(Mat,Vec,PetscReal,MatSORType,PetscReal,PetscInt,PetscInt,Vec);
24: /* UGLY, ugly, ugly
25: When MatScalar == PetscScalar the function MatSetValuesBlocked_MPIBAIJ_MatScalar() does
26: not exist. Otherwise ..._MatScalar() takes matrix elements in single precision and
27: inserts them into the single precision data structure. The function MatSetValuesBlocked_MPIBAIJ()
28: converts the entries into single precision and then calls ..._MatScalar() to put them
29: into the single precision data structures.
30: */
31: #if defined(PETSC_USE_MAT_SINGLE)
32: EXTERN PetscErrorCode MatSetValuesBlocked_SeqSBAIJ_MatScalar(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt[],const MatScalar[],InsertMode);
33: EXTERN PetscErrorCode MatSetValues_MPISBAIJ_MatScalar(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt[],const MatScalar[],InsertMode);
34: EXTERN PetscErrorCode MatSetValuesBlocked_MPISBAIJ_MatScalar(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt[],const MatScalar[],InsertMode);
35: EXTERN PetscErrorCode MatSetValues_MPISBAIJ_HT_MatScalar(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt[],const MatScalar[],InsertMode);
36: EXTERN PetscErrorCode MatSetValuesBlocked_MPISBAIJ_HT_MatScalar(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt[],const MatScalar[],InsertMode);
37: #else
38: #define MatSetValuesBlocked_SeqSBAIJ_MatScalar MatSetValuesBlocked_SeqSBAIJ
39: #define MatSetValues_MPISBAIJ_MatScalar MatSetValues_MPISBAIJ
40: #define MatSetValuesBlocked_MPISBAIJ_MatScalar MatSetValuesBlocked_MPISBAIJ
41: #define MatSetValues_MPISBAIJ_HT_MatScalar MatSetValues_MPISBAIJ_HT
42: #define MatSetValuesBlocked_MPISBAIJ_HT_MatScalar MatSetValuesBlocked_MPISBAIJ_HT
43: #endif
48: PetscErrorCode PETSCMAT_DLLEXPORT MatStoreValues_MPISBAIJ(Mat mat)
49: {
50: Mat_MPISBAIJ *aij = (Mat_MPISBAIJ *)mat->data;
54: MatStoreValues(aij->A);
55: MatStoreValues(aij->B);
56: return(0);
57: }
63: PetscErrorCode PETSCMAT_DLLEXPORT MatRetrieveValues_MPISBAIJ(Mat mat)
64: {
65: Mat_MPISBAIJ *aij = (Mat_MPISBAIJ *)mat->data;
69: MatRetrieveValues(aij->A);
70: MatRetrieveValues(aij->B);
71: return(0);
72: }
76: #define CHUNKSIZE 10
78: #define MatSetValues_SeqSBAIJ_A_Private(row,col,value,addv) \
79: { \
80: \
81: brow = row/bs; \
82: rp = aj + ai[brow]; ap = aa + bs2*ai[brow]; \
83: rmax = aimax[brow]; nrow = ailen[brow]; \
84: bcol = col/bs; \
85: ridx = row % bs; cidx = col % bs; \
86: low = 0; high = nrow; \
87: while (high-low > 3) { \
88: t = (low+high)/2; \
89: if (rp[t] > bcol) high = t; \
90: else low = t; \
91: } \
92: for (_i=low; _i<high; _i++) { \
93: if (rp[_i] > bcol) break; \
94: if (rp[_i] == bcol) { \
95: bap = ap + bs2*_i + bs*cidx + ridx; \
96: if (addv == ADD_VALUES) *bap += value; \
97: else *bap = value; \
98: goto a_noinsert; \
99: } \
100: } \
101: if (a->nonew == 1) goto a_noinsert; \
102: if (a->nonew == -1) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero (%D, %D) into matrix", row, col); \
103: MatSeqXAIJReallocateAIJ(a,bs2,nrow,brow,bcol,rmax,aa,ai,aj,a->mbs,rp,ap,aimax,a->nonew); \
104: N = nrow++ - 1; \
105: /* shift up all the later entries in this row */ \
106: for (ii=N; ii>=_i; ii--) { \
107: rp[ii+1] = rp[ii]; \
108: PetscMemcpy(ap+bs2*(ii+1),ap+bs2*(ii),bs2*sizeof(MatScalar)); \
109: } \
110: if (N>=_i) { PetscMemzero(ap+bs2*_i,bs2*sizeof(MatScalar)); } \
111: rp[_i] = bcol; \
112: ap[bs2*_i + bs*cidx + ridx] = value; \
113: a_noinsert:; \
114: ailen[brow] = nrow; \
115: }
116: #ifndef MatSetValues_SeqBAIJ_B_Private
117: #define MatSetValues_SeqSBAIJ_B_Private(row,col,value,addv) \
118: { \
119: brow = row/bs; \
120: rp = bj + bi[brow]; ap = ba + bs2*bi[brow]; \
121: rmax = bimax[brow]; nrow = bilen[brow]; \
122: bcol = col/bs; \
123: ridx = row % bs; cidx = col % bs; \
124: low = 0; high = nrow; \
125: while (high-low > 3) { \
126: t = (low+high)/2; \
127: if (rp[t] > bcol) high = t; \
128: else low = t; \
129: } \
130: for (_i=low; _i<high; _i++) { \
131: if (rp[_i] > bcol) break; \
132: if (rp[_i] == bcol) { \
133: bap = ap + bs2*_i + bs*cidx + ridx; \
134: if (addv == ADD_VALUES) *bap += value; \
135: else *bap = value; \
136: goto b_noinsert; \
137: } \
138: } \
139: if (b->nonew == 1) goto b_noinsert; \
140: if (b->nonew == -1) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero (%D, %D) into matrix", row, col); \
141: MatSeqXAIJReallocateAIJ(b,bs2,nrow,brow,bcol,rmax,ba,bi,bj,b->mbs,rp,ap,bimax,b->nonew); \
142: N = nrow++ - 1; \
143: /* shift up all the later entries in this row */ \
144: for (ii=N; ii>=_i; ii--) { \
145: rp[ii+1] = rp[ii]; \
146: PetscMemcpy(ap+bs2*(ii+1),ap+bs2*(ii),bs2*sizeof(MatScalar)); \
147: } \
148: if (N>=_i) { PetscMemzero(ap+bs2*_i,bs2*sizeof(MatScalar));} \
149: rp[_i] = bcol; \
150: ap[bs2*_i + bs*cidx + ridx] = value; \
151: b_noinsert:; \
152: bilen[brow] = nrow; \
153: }
154: #endif
156: #if defined(PETSC_USE_MAT_SINGLE)
159: PetscErrorCode MatSetValues_MPISBAIJ(Mat mat,PetscInt m,const PetscInt im[],PetscInt n,const PetscInt in[],const PetscScalar v[],InsertMode addv)
160: {
161: Mat_MPISBAIJ *b = (Mat_MPISBAIJ*)mat->data;
163: PetscInt i,N = m*n;
164: MatScalar *vsingle;
167: if (N > b->setvalueslen) {
168: if (b->setvaluescopy) {PetscFree(b->setvaluescopy);}
169: PetscMalloc(N*sizeof(MatScalar),&b->setvaluescopy);
170: b->setvalueslen = N;
171: }
172: vsingle = b->setvaluescopy;
174: for (i=0; i<N; i++) {
175: vsingle[i] = v[i];
176: }
177: MatSetValues_MPISBAIJ_MatScalar(mat,m,im,n,in,vsingle,addv);
178: return(0);
179: }
183: PetscErrorCode MatSetValuesBlocked_MPISBAIJ(Mat mat,PetscInt m,const PetscInt im[],PetscInt n,const PetscInt in[],const PetscScalar v[],InsertMode addv)
184: {
185: Mat_MPIBAIJ *b = (Mat_MPIBAIJ*)mat->data;
187: PetscInt i,N = m*n*b->bs2;
188: MatScalar *vsingle;
191: if (N > b->setvalueslen) {
192: if (b->setvaluescopy) {PetscFree(b->setvaluescopy);}
193: PetscMalloc(N*sizeof(MatScalar),&b->setvaluescopy);
194: b->setvalueslen = N;
195: }
196: vsingle = b->setvaluescopy;
197: for (i=0; i<N; i++) {
198: vsingle[i] = v[i];
199: }
200: MatSetValuesBlocked_MPISBAIJ_MatScalar(mat,m,im,n,in,vsingle,addv);
201: return(0);
202: }
203: #endif
205: /* Only add/insert a(i,j) with i<=j (blocks).
206: Any a(i,j) with i>j input by user is ingored.
207: */
210: PetscErrorCode MatSetValues_MPISBAIJ_MatScalar(Mat mat,PetscInt m,const PetscInt im[],PetscInt n,const PetscInt in[],const MatScalar v[],InsertMode addv)
211: {
212: Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
213: MatScalar value;
214: PetscTruth roworiented = baij->roworiented;
216: PetscInt i,j,row,col;
217: PetscInt rstart_orig=baij->rstart_bs;
218: PetscInt rend_orig=baij->rend_bs,cstart_orig=baij->cstart_bs;
219: PetscInt cend_orig=baij->cend_bs,bs=mat->bs;
221: /* Some Variables required in the macro */
222: Mat A = baij->A;
223: Mat_SeqSBAIJ *a = (Mat_SeqSBAIJ*)(A)->data;
224: PetscInt *aimax=a->imax,*ai=a->i,*ailen=a->ilen,*aj=a->j;
225: MatScalar *aa=a->a;
227: Mat B = baij->B;
228: Mat_SeqBAIJ *b = (Mat_SeqBAIJ*)(B)->data;
229: PetscInt *bimax=b->imax,*bi=b->i,*bilen=b->ilen,*bj=b->j;
230: MatScalar *ba=b->a;
232: PetscInt *rp,ii,nrow,_i,rmax,N,brow,bcol;
233: PetscInt low,high,t,ridx,cidx,bs2=a->bs2;
234: MatScalar *ap,*bap;
236: /* for stash */
237: PetscInt n_loc, *in_loc=0;
238: MatScalar *v_loc=0;
242: if(!baij->donotstash){
243: PetscMalloc(n*sizeof(PetscInt),&in_loc);
244: PetscMalloc(n*sizeof(MatScalar),&v_loc);
245: }
247: for (i=0; i<m; i++) {
248: if (im[i] < 0) continue;
249: #if defined(PETSC_USE_DEBUG)
250: if (im[i] >= mat->M) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",im[i],mat->M-1);
251: #endif
252: if (im[i] >= rstart_orig && im[i] < rend_orig) { /* this processor entry */
253: row = im[i] - rstart_orig; /* local row index */
254: for (j=0; j<n; j++) {
255: if (im[i]/bs > in[j]/bs) continue; /* ignore lower triangular blocks */
256: if (in[j] >= cstart_orig && in[j] < cend_orig){ /* diag entry (A) */
257: col = in[j] - cstart_orig; /* local col index */
258: brow = row/bs; bcol = col/bs;
259: if (brow > bcol) continue; /* ignore lower triangular blocks of A */
260: if (roworiented) value = v[i*n+j]; else value = v[i+j*m];
261: MatSetValues_SeqSBAIJ_A_Private(row,col,value,addv);
262: /* MatSetValues_SeqBAIJ(baij->A,1,&row,1,&col,&value,addv); */
263: } else if (in[j] < 0) continue;
264: #if defined(PETSC_USE_DEBUG)
265: else if (in[j] >= mat->N) {SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",in[j],mat->N-1);}
266: #endif
267: else { /* off-diag entry (B) */
268: if (mat->was_assembled) {
269: if (!baij->colmap) {
270: CreateColmap_MPIBAIJ_Private(mat);
271: }
272: #if defined (PETSC_USE_CTABLE)
273: PetscTableFind(baij->colmap,in[j]/bs + 1,&col);
274: col = col - 1;
275: #else
276: col = baij->colmap[in[j]/bs] - 1;
277: #endif
278: if (col < 0 && !((Mat_SeqSBAIJ*)(baij->A->data))->nonew) {
279: DisAssemble_MPISBAIJ(mat);
280: col = in[j];
281: /* Reinitialize the variables required by MatSetValues_SeqBAIJ_B_Private() */
282: B = baij->B;
283: b = (Mat_SeqBAIJ*)(B)->data;
284: bimax=b->imax;bi=b->i;bilen=b->ilen;bj=b->j;
285: ba=b->a;
286: } else col += in[j]%bs;
287: } else col = in[j];
288: if (roworiented) value = v[i*n+j]; else value = v[i+j*m];
289: MatSetValues_SeqSBAIJ_B_Private(row,col,value,addv);
290: /* MatSetValues_SeqBAIJ(baij->B,1,&row,1,&col,&value,addv); */
291: }
292: }
293: } else { /* off processor entry */
294: if (!baij->donotstash) {
295: n_loc = 0;
296: for (j=0; j<n; j++){
297: if (im[i]/bs > in[j]/bs) continue; /* ignore lower triangular blocks */
298: in_loc[n_loc] = in[j];
299: if (roworiented) {
300: v_loc[n_loc] = v[i*n+j];
301: } else {
302: v_loc[n_loc] = v[j*m+i];
303: }
304: n_loc++;
305: }
306: MatStashValuesRow_Private(&mat->stash,im[i],n_loc,in_loc,v_loc);
307: }
308: }
309: }
311: if(!baij->donotstash){
312: PetscFree(in_loc);
313: PetscFree(v_loc);
314: }
315: return(0);
316: }
320: PetscErrorCode MatSetValuesBlocked_MPISBAIJ_MatScalar(Mat mat,PetscInt m,const PetscInt im[],PetscInt n,const PetscInt in[],const MatScalar v[],InsertMode addv)
321: {
322: Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
323: const MatScalar *value;
324: MatScalar *barray=baij->barray;
325: PetscTruth roworiented = baij->roworiented;
326: PetscErrorCode ierr;
327: PetscInt i,j,ii,jj,row,col,rstart=baij->rstart;
328: PetscInt rend=baij->rend,cstart=baij->cstart,stepval;
329: PetscInt cend=baij->cend,bs=mat->bs,bs2=baij->bs2;
332: if(!barray) {
333: PetscMalloc(bs2*sizeof(MatScalar),&barray);
334: baij->barray = barray;
335: }
337: if (roworiented) {
338: stepval = (n-1)*bs;
339: } else {
340: stepval = (m-1)*bs;
341: }
342: for (i=0; i<m; i++) {
343: if (im[i] < 0) continue;
344: #if defined(PETSC_USE_DEBUG)
345: if (im[i] >= baij->Mbs) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Row too large, row %D max %D",im[i],baij->Mbs-1);
346: #endif
347: if (im[i] >= rstart && im[i] < rend) {
348: row = im[i] - rstart;
349: for (j=0; j<n; j++) {
350: /* If NumCol = 1 then a copy is not required */
351: if ((roworiented) && (n == 1)) {
352: barray = (MatScalar*) v + i*bs2;
353: } else if((!roworiented) && (m == 1)) {
354: barray = (MatScalar*) v + j*bs2;
355: } else { /* Here a copy is required */
356: if (roworiented) {
357: value = v + i*(stepval+bs)*bs + j*bs;
358: } else {
359: value = v + j*(stepval+bs)*bs + i*bs;
360: }
361: for (ii=0; ii<bs; ii++,value+=stepval) {
362: for (jj=0; jj<bs; jj++) {
363: *barray++ = *value++;
364: }
365: }
366: barray -=bs2;
367: }
368:
369: if (in[j] >= cstart && in[j] < cend){
370: col = in[j] - cstart;
371: MatSetValuesBlocked_SeqSBAIJ(baij->A,1,&row,1,&col,barray,addv);
372: }
373: else if (in[j] < 0) continue;
374: #if defined(PETSC_USE_DEBUG)
375: else if (in[j] >= baij->Nbs) {SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Column too large, col %D max %D",in[j],baij->Nbs-1);}
376: #endif
377: else {
378: if (mat->was_assembled) {
379: if (!baij->colmap) {
380: CreateColmap_MPIBAIJ_Private(mat);
381: }
383: #if defined(PETSC_USE_DEBUG)
384: #if defined (PETSC_USE_CTABLE)
385: { PetscInt data;
386: PetscTableFind(baij->colmap,in[j]+1,&data);
387: if ((data - 1) % bs) SETERRQ(PETSC_ERR_PLIB,"Incorrect colmap");
388: }
389: #else
390: if ((baij->colmap[in[j]] - 1) % bs) SETERRQ(PETSC_ERR_PLIB,"Incorrect colmap");
391: #endif
392: #endif
393: #if defined (PETSC_USE_CTABLE)
394: PetscTableFind(baij->colmap,in[j]+1,&col);
395: col = (col - 1)/bs;
396: #else
397: col = (baij->colmap[in[j]] - 1)/bs;
398: #endif
399: if (col < 0 && !((Mat_SeqBAIJ*)(baij->A->data))->nonew) {
400: DisAssemble_MPISBAIJ(mat);
401: col = in[j];
402: }
403: }
404: else col = in[j];
405: MatSetValuesBlocked_SeqBAIJ(baij->B,1,&row,1,&col,barray,addv);
406: }
407: }
408: } else {
409: if (!baij->donotstash) {
410: if (roworiented) {
411: MatStashValuesRowBlocked_Private(&mat->bstash,im[i],n,in,v,m,n,i);
412: } else {
413: MatStashValuesColBlocked_Private(&mat->bstash,im[i],n,in,v,m,n,i);
414: }
415: }
416: }
417: }
418: return(0);
419: }
423: PetscErrorCode MatGetValues_MPISBAIJ(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],PetscScalar v[])
424: {
425: Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
427: PetscInt bs=mat->bs,i,j,bsrstart = baij->rstart*bs,bsrend = baij->rend*bs;
428: PetscInt bscstart = baij->cstart*bs,bscend = baij->cend*bs,row,col,data;
431: for (i=0; i<m; i++) {
432: if (idxm[i] < 0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"Negative row: %D",idxm[i]);
433: if (idxm[i] >= mat->M) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",idxm[i],mat->M-1);
434: if (idxm[i] >= bsrstart && idxm[i] < bsrend) {
435: row = idxm[i] - bsrstart;
436: for (j=0; j<n; j++) {
437: if (idxn[j] < 0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"Negative column %D",idxn[j]);
438: if (idxn[j] >= mat->N) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",idxn[j],mat->N-1);
439: if (idxn[j] >= bscstart && idxn[j] < bscend){
440: col = idxn[j] - bscstart;
441: MatGetValues_SeqSBAIJ(baij->A,1,&row,1,&col,v+i*n+j);
442: } else {
443: if (!baij->colmap) {
444: CreateColmap_MPIBAIJ_Private(mat);
445: }
446: #if defined (PETSC_USE_CTABLE)
447: PetscTableFind(baij->colmap,idxn[j]/bs+1,&data);
448: data --;
449: #else
450: data = baij->colmap[idxn[j]/bs]-1;
451: #endif
452: if((data < 0) || (baij->garray[data/bs] != idxn[j]/bs)) *(v+i*n+j) = 0.0;
453: else {
454: col = data + idxn[j]%bs;
455: MatGetValues_SeqBAIJ(baij->B,1,&row,1,&col,v+i*n+j);
456: }
457: }
458: }
459: } else {
460: SETERRQ(PETSC_ERR_SUP,"Only local values currently supported");
461: }
462: }
463: return(0);
464: }
468: PetscErrorCode MatNorm_MPISBAIJ(Mat mat,NormType type,PetscReal *norm)
469: {
470: Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
472: PetscReal sum[2],*lnorm2;
475: if (baij->size == 1) {
476: MatNorm(baij->A,type,norm);
477: } else {
478: if (type == NORM_FROBENIUS) {
479: PetscMalloc(2*sizeof(PetscReal),&lnorm2);
480: MatNorm(baij->A,type,lnorm2);
481: *lnorm2 = (*lnorm2)*(*lnorm2); lnorm2++; /* squar power of norm(A) */
482: MatNorm(baij->B,type,lnorm2);
483: *lnorm2 = (*lnorm2)*(*lnorm2); lnorm2--; /* squar power of norm(B) */
484: MPI_Allreduce(lnorm2,&sum,2,MPIU_REAL,MPI_SUM,mat->comm);
485: *norm = sqrt(sum[0] + 2*sum[1]);
486: PetscFree(lnorm2);
487: } else if (type == NORM_INFINITY || type == NORM_1) { /* max row/column sum */
488: Mat_SeqSBAIJ *amat=(Mat_SeqSBAIJ*)baij->A->data;
489: Mat_SeqBAIJ *bmat=(Mat_SeqBAIJ*)baij->B->data;
490: PetscReal *rsum,*rsum2,vabs;
491: PetscInt *jj,*garray=baij->garray,rstart=baij->rstart,nz;
492: PetscInt brow,bcol,col,bs=baij->A->bs,row,grow,gcol,mbs=amat->mbs;
493: MatScalar *v;
495: PetscMalloc((2*mat->N+1)*sizeof(PetscReal),&rsum);
496: rsum2 = rsum + mat->N;
497: PetscMemzero(rsum,mat->N*sizeof(PetscReal));
498: /* Amat */
499: v = amat->a; jj = amat->j;
500: for (brow=0; brow<mbs; brow++) {
501: grow = bs*(rstart + brow);
502: nz = amat->i[brow+1] - amat->i[brow];
503: for (bcol=0; bcol<nz; bcol++){
504: gcol = bs*(rstart + *jj); jj++;
505: for (col=0; col<bs; col++){
506: for (row=0; row<bs; row++){
507: vabs = PetscAbsScalar(*v); v++;
508: rsum[gcol+col] += vabs;
509: /* non-diagonal block */
510: if (bcol > 0 && vabs > 0.0) rsum[grow+row] += vabs;
511: }
512: }
513: }
514: }
515: /* Bmat */
516: v = bmat->a; jj = bmat->j;
517: for (brow=0; brow<mbs; brow++) {
518: grow = bs*(rstart + brow);
519: nz = bmat->i[brow+1] - bmat->i[brow];
520: for (bcol=0; bcol<nz; bcol++){
521: gcol = bs*garray[*jj]; jj++;
522: for (col=0; col<bs; col++){
523: for (row=0; row<bs; row++){
524: vabs = PetscAbsScalar(*v); v++;
525: rsum[gcol+col] += vabs;
526: rsum[grow+row] += vabs;
527: }
528: }
529: }
530: }
531: MPI_Allreduce(rsum,rsum2,mat->N,MPIU_REAL,MPI_SUM,mat->comm);
532: *norm = 0.0;
533: for (col=0; col<mat->N; col++) {
534: if (rsum2[col] > *norm) *norm = rsum2[col];
535: }
536: PetscFree(rsum);
537: } else {
538: SETERRQ(PETSC_ERR_SUP,"No support for this norm yet");
539: }
540: }
541: return(0);
542: }
546: PetscErrorCode MatAssemblyBegin_MPISBAIJ(Mat mat,MatAssemblyType mode)
547: {
548: Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
550: PetscInt nstash,reallocs;
551: InsertMode addv;
554: if (baij->donotstash) {
555: return(0);
556: }
558: /* make sure all processors are either in INSERTMODE or ADDMODE */
559: MPI_Allreduce(&mat->insertmode,&addv,1,MPI_INT,MPI_BOR,mat->comm);
560: if (addv == (ADD_VALUES|INSERT_VALUES)) {
561: SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Some processors inserted others added");
562: }
563: mat->insertmode = addv; /* in case this processor had no cache */
565: MatStashScatterBegin_Private(&mat->stash,baij->rowners_bs);
566: MatStashScatterBegin_Private(&mat->bstash,baij->rowners);
567: MatStashGetInfo_Private(&mat->stash,&nstash,&reallocs);
568: PetscLogInfo((0,"MatAssemblyBegin_MPISBAIJ:Stash has %D entries,uses %D mallocs.\n",nstash,reallocs));
569: MatStashGetInfo_Private(&mat->stash,&nstash,&reallocs);
570: PetscLogInfo((0,"MatAssemblyBegin_MPISBAIJ:Block-Stash has %D entries, uses %D mallocs.\n",nstash,reallocs));
571: return(0);
572: }
576: PetscErrorCode MatAssemblyEnd_MPISBAIJ(Mat mat,MatAssemblyType mode)
577: {
578: Mat_MPISBAIJ *baij=(Mat_MPISBAIJ*)mat->data;
579: Mat_SeqSBAIJ *a=(Mat_SeqSBAIJ*)baij->A->data;
580: Mat_SeqBAIJ *b=(Mat_SeqBAIJ*)baij->B->data;
582: PetscInt i,j,rstart,ncols,flg,bs2=baij->bs2;
583: PetscInt *row,*col,other_disassembled;
584: PetscMPIInt n;
585: PetscTruth r1,r2,r3;
586: MatScalar *val;
587: InsertMode addv = mat->insertmode;
591: if (!baij->donotstash) {
592: while (1) {
593: MatStashScatterGetMesg_Private(&mat->stash,&n,&row,&col,&val,&flg);
594: if (!flg) break;
596: for (i=0; i<n;) {
597: /* Now identify the consecutive vals belonging to the same row */
598: for (j=i,rstart=row[j]; j<n; j++) { if (row[j] != rstart) break; }
599: if (j < n) ncols = j-i;
600: else ncols = n-i;
601: /* Now assemble all these values with a single function call */
602: MatSetValues_MPISBAIJ_MatScalar(mat,1,row+i,ncols,col+i,val+i,addv);
603: i = j;
604: }
605: }
606: MatStashScatterEnd_Private(&mat->stash);
607: /* Now process the block-stash. Since the values are stashed column-oriented,
608: set the roworiented flag to column oriented, and after MatSetValues()
609: restore the original flags */
610: r1 = baij->roworiented;
611: r2 = a->roworiented;
612: r3 = b->roworiented;
613: baij->roworiented = PETSC_FALSE;
614: a->roworiented = PETSC_FALSE;
615: b->roworiented = PETSC_FALSE;
616: while (1) {
617: MatStashScatterGetMesg_Private(&mat->bstash,&n,&row,&col,&val,&flg);
618: if (!flg) break;
619:
620: for (i=0; i<n;) {
621: /* Now identify the consecutive vals belonging to the same row */
622: for (j=i,rstart=row[j]; j<n; j++) { if (row[j] != rstart) break; }
623: if (j < n) ncols = j-i;
624: else ncols = n-i;
625: MatSetValuesBlocked_MPISBAIJ_MatScalar(mat,1,row+i,ncols,col+i,val+i*bs2,addv);
626: i = j;
627: }
628: }
629: MatStashScatterEnd_Private(&mat->bstash);
630: baij->roworiented = r1;
631: a->roworiented = r2;
632: b->roworiented = r3;
633: }
635: MatAssemblyBegin(baij->A,mode);
636: MatAssemblyEnd(baij->A,mode);
638: /* determine if any processor has disassembled, if so we must
639: also disassemble ourselfs, in order that we may reassemble. */
640: /*
641: if nonzero structure of submatrix B cannot change then we know that
642: no processor disassembled thus we can skip this stuff
643: */
644: if (!((Mat_SeqBAIJ*)baij->B->data)->nonew) {
645: MPI_Allreduce(&mat->was_assembled,&other_disassembled,1,MPI_INT,MPI_PROD,mat->comm);
646: if (mat->was_assembled && !other_disassembled) {
647: DisAssemble_MPISBAIJ(mat);
648: }
649: }
651: if (!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) {
652: MatSetUpMultiply_MPISBAIJ(mat); /* setup Mvctx and sMvctx */
653: }
654: b->compressedrow.use = PETSC_TRUE;
655: MatAssemblyBegin(baij->B,mode);
656: MatAssemblyEnd(baij->B,mode);
657:
658: if (baij->rowvalues) {
659: PetscFree(baij->rowvalues);
660: baij->rowvalues = 0;
661: }
663: return(0);
664: }
668: static PetscErrorCode MatView_MPISBAIJ_ASCIIorDraworSocket(Mat mat,PetscViewer viewer)
669: {
670: Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
671: PetscErrorCode ierr;
672: PetscInt bs = mat->bs;
673: PetscMPIInt size = baij->size,rank = baij->rank;
674: PetscTruth iascii,isdraw;
675: PetscViewer sviewer;
676: PetscViewerFormat format;
679: PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&iascii);
680: PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_DRAW,&isdraw);
681: if (iascii) {
682: PetscViewerGetFormat(viewer,&format);
683: if (format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
684: MatInfo info;
685: MPI_Comm_rank(mat->comm,&rank);
686: MatGetInfo(mat,MAT_LOCAL,&info);
687: PetscViewerASCIISynchronizedPrintf(viewer,"[%d] Local rows %D nz %D nz alloced %D bs %D mem %D\n",
688: rank,mat->m,(PetscInt)info.nz_used*bs,(PetscInt)info.nz_allocated*bs,
689: mat->bs,(PetscInt)info.memory);
690: MatGetInfo(baij->A,MAT_LOCAL,&info);
691: PetscViewerASCIISynchronizedPrintf(viewer,"[%d] on-diagonal part: nz %D \n",rank,(PetscInt)info.nz_used*bs);
692: MatGetInfo(baij->B,MAT_LOCAL,&info);
693: PetscViewerASCIISynchronizedPrintf(viewer,"[%d] off-diagonal part: nz %D \n",rank,(PetscInt)info.nz_used*bs);
694: PetscViewerFlush(viewer);
695: VecScatterView(baij->Mvctx,viewer);
696: return(0);
697: } else if (format == PETSC_VIEWER_ASCII_INFO) {
698: PetscViewerASCIIPrintf(viewer," block size is %D\n",bs);
699: return(0);
700: }
701: }
703: if (isdraw) {
704: PetscDraw draw;
705: PetscTruth isnull;
706: PetscViewerDrawGetDraw(viewer,0,&draw);
707: PetscDrawIsNull(draw,&isnull); if (isnull) return(0);
708: }
710: if (size == 1) {
711: PetscObjectSetName((PetscObject)baij->A,mat->name);
712: MatView(baij->A,viewer);
713: } else {
714: /* assemble the entire matrix onto first processor. */
715: Mat A;
716: Mat_SeqSBAIJ *Aloc;
717: Mat_SeqBAIJ *Bloc;
718: PetscInt M = mat->M,N = mat->N,*ai,*aj,col,i,j,k,*rvals,mbs = baij->mbs;
719: MatScalar *a;
721: /* Should this be the same type as mat? */
722: MatCreate(mat->comm,&A);
723: if (!rank) {
724: MatSetSizes(A,M,N,M,N);
725: } else {
726: MatSetSizes(A,0,0,M,N);
727: }
728: MatSetType(A,MATMPISBAIJ);
729: MatMPISBAIJSetPreallocation(A,mat->bs,0,PETSC_NULL,0,PETSC_NULL);
730: PetscLogObjectParent(mat,A);
732: /* copy over the A part */
733: Aloc = (Mat_SeqSBAIJ*)baij->A->data;
734: ai = Aloc->i; aj = Aloc->j; a = Aloc->a;
735: PetscMalloc(bs*sizeof(PetscInt),&rvals);
737: for (i=0; i<mbs; i++) {
738: rvals[0] = bs*(baij->rstart + i);
739: for (j=1; j<bs; j++) { rvals[j] = rvals[j-1] + 1; }
740: for (j=ai[i]; j<ai[i+1]; j++) {
741: col = (baij->cstart+aj[j])*bs;
742: for (k=0; k<bs; k++) {
743: MatSetValues_MPISBAIJ_MatScalar(A,bs,rvals,1,&col,a,INSERT_VALUES);
744: col++; a += bs;
745: }
746: }
747: }
748: /* copy over the B part */
749: Bloc = (Mat_SeqBAIJ*)baij->B->data;
750: ai = Bloc->i; aj = Bloc->j; a = Bloc->a;
751: for (i=0; i<mbs; i++) {
752: rvals[0] = bs*(baij->rstart + i);
753: for (j=1; j<bs; j++) { rvals[j] = rvals[j-1] + 1; }
754: for (j=ai[i]; j<ai[i+1]; j++) {
755: col = baij->garray[aj[j]]*bs;
756: for (k=0; k<bs; k++) {
757: MatSetValues_MPISBAIJ_MatScalar(A,bs,rvals,1,&col,a,INSERT_VALUES);
758: col++; a += bs;
759: }
760: }
761: }
762: PetscFree(rvals);
763: MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
764: MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
765: /*
766: Everyone has to call to draw the matrix since the graphics waits are
767: synchronized across all processors that share the PetscDraw object
768: */
769: PetscViewerGetSingleton(viewer,&sviewer);
770: if (!rank) {
771: PetscObjectSetName((PetscObject)((Mat_MPISBAIJ*)(A->data))->A,mat->name);
772: MatView(((Mat_MPISBAIJ*)(A->data))->A,sviewer);
773: }
774: PetscViewerRestoreSingleton(viewer,&sviewer);
775: MatDestroy(A);
776: }
777: return(0);
778: }
782: PetscErrorCode MatView_MPISBAIJ(Mat mat,PetscViewer viewer)
783: {
785: PetscTruth iascii,isdraw,issocket,isbinary;
788: PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&iascii);
789: PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_DRAW,&isdraw);
790: PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_SOCKET,&issocket);
791: PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_BINARY,&isbinary);
792: if (iascii || isdraw || issocket || isbinary) {
793: MatView_MPISBAIJ_ASCIIorDraworSocket(mat,viewer);
794: } else {
795: SETERRQ1(PETSC_ERR_SUP,"Viewer type %s not supported by MPISBAIJ matrices",((PetscObject)viewer)->type_name);
796: }
797: return(0);
798: }
802: PetscErrorCode MatDestroy_MPISBAIJ(Mat mat)
803: {
804: Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
808: #if defined(PETSC_USE_LOG)
809: PetscLogObjectState((PetscObject)mat,"Rows=%D,Cols=%D",mat->M,mat->N);
810: #endif
811: MatStashDestroy_Private(&mat->stash);
812: MatStashDestroy_Private(&mat->bstash);
813: PetscFree(baij->rowners);
814: MatDestroy(baij->A);
815: MatDestroy(baij->B);
816: #if defined (PETSC_USE_CTABLE)
817: if (baij->colmap) {PetscTableDelete(baij->colmap);}
818: #else
819: if (baij->colmap) {PetscFree(baij->colmap);}
820: #endif
821: if (baij->garray) {PetscFree(baij->garray);}
822: if (baij->lvec) {VecDestroy(baij->lvec);}
823: if (baij->Mvctx) {VecScatterDestroy(baij->Mvctx);}
824: if (baij->slvec0) {
825: VecDestroy(baij->slvec0);
826: VecDestroy(baij->slvec0b);
827: }
828: if (baij->slvec1) {
829: VecDestroy(baij->slvec1);
830: VecDestroy(baij->slvec1a);
831: VecDestroy(baij->slvec1b);
832: }
833: if (baij->sMvctx) {VecScatterDestroy(baij->sMvctx);}
834: if (baij->rowvalues) {PetscFree(baij->rowvalues);}
835: if (baij->barray) {PetscFree(baij->barray);}
836: if (baij->hd) {PetscFree(baij->hd);}
837: #if defined(PETSC_USE_MAT_SINGLE)
838: if (baij->setvaluescopy) {PetscFree(baij->setvaluescopy);}
839: #endif
840: PetscFree(baij);
842: PetscObjectComposeFunction((PetscObject)mat,"MatStoreValues_C","",PETSC_NULL);
843: PetscObjectComposeFunction((PetscObject)mat,"MatRetrieveValues_C","",PETSC_NULL);
844: PetscObjectComposeFunction((PetscObject)mat,"MatGetDiagonalBlock_C","",PETSC_NULL);
845: PetscObjectComposeFunction((PetscObject)mat,"MatMPISBAIJSetPreallocation_C","",PETSC_NULL);
846: return(0);
847: }
851: PetscErrorCode MatMult_MPISBAIJ(Mat A,Vec xx,Vec yy)
852: {
853: Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
855: PetscInt nt,mbs=a->mbs,bs=A->bs;
856: PetscScalar *x,*from,zero=0.0;
857:
859: VecGetLocalSize(xx,&nt);
860: if (nt != A->n) {
861: SETERRQ(PETSC_ERR_ARG_SIZ,"Incompatible partition of A and xx");
862: }
863: VecGetLocalSize(yy,&nt);
864: if (nt != A->m) {
865: SETERRQ(PETSC_ERR_ARG_SIZ,"Incompatible parition of A and yy");
866: }
868: /* diagonal part */
869: (*a->A->ops->mult)(a->A,xx,a->slvec1a);
870: VecSet(a->slvec1b,zero);
872: /* subdiagonal part */
873: (*a->B->ops->multtranspose)(a->B,xx,a->slvec0b);
875: /* copy x into the vec slvec0 */
876: VecGetArray(a->slvec0,&from);
877: VecGetArray(xx,&x);
878: PetscMemcpy(from,x,bs*mbs*sizeof(MatScalar));
879: VecRestoreArray(a->slvec0,&from);
880:
881: VecScatterBegin(a->slvec0,a->slvec1,ADD_VALUES,SCATTER_FORWARD,a->sMvctx);
882: VecRestoreArray(xx,&x);
883: VecScatterEnd(a->slvec0,a->slvec1,ADD_VALUES,SCATTER_FORWARD,a->sMvctx);
884:
885: /* supperdiagonal part */
886: (*a->B->ops->multadd)(a->B,a->slvec1b,a->slvec1a,yy);
887:
888: return(0);
889: }
893: PetscErrorCode MatMult_MPISBAIJ_2comm(Mat A,Vec xx,Vec yy)
894: {
895: Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
897: PetscInt nt;
900: VecGetLocalSize(xx,&nt);
901: if (nt != A->n) {
902: SETERRQ(PETSC_ERR_ARG_SIZ,"Incompatible partition of A and xx");
903: }
904: VecGetLocalSize(yy,&nt);
905: if (nt != A->m) {
906: SETERRQ(PETSC_ERR_ARG_SIZ,"Incompatible parition of A and yy");
907: }
909: VecScatterBegin(xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD,a->Mvctx);
910: /* do diagonal part */
911: (*a->A->ops->mult)(a->A,xx,yy);
912: /* do supperdiagonal part */
913: VecScatterEnd(xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD,a->Mvctx);
914: (*a->B->ops->multadd)(a->B,a->lvec,yy,yy);
915: /* do subdiagonal part */
916: (*a->B->ops->multtranspose)(a->B,xx,a->lvec);
917: VecScatterBegin(a->lvec,yy,ADD_VALUES,SCATTER_REVERSE,a->Mvctx);
918: VecScatterEnd(a->lvec,yy,ADD_VALUES,SCATTER_REVERSE,a->Mvctx);
920: return(0);
921: }
925: PetscErrorCode MatMultAdd_MPISBAIJ(Mat A,Vec xx,Vec yy,Vec zz)
926: {
927: Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
929: PetscInt mbs=a->mbs,bs=A->bs;
930: PetscScalar *x,*from,zero=0.0;
931:
933: /*
934: PetscSynchronizedPrintf(A->comm," MatMultAdd is called ...\n");
935: PetscSynchronizedFlush(A->comm);
936: */
937: /* diagonal part */
938: (*a->A->ops->multadd)(a->A,xx,yy,a->slvec1a);
939: VecSet(a->slvec1b,zero);
941: /* subdiagonal part */
942: (*a->B->ops->multtranspose)(a->B,xx,a->slvec0b);
944: /* copy x into the vec slvec0 */
945: VecGetArray(a->slvec0,&from);
946: VecGetArray(xx,&x);
947: PetscMemcpy(from,x,bs*mbs*sizeof(MatScalar));
948: VecRestoreArray(a->slvec0,&from);
949:
950: VecScatterBegin(a->slvec0,a->slvec1,ADD_VALUES,SCATTER_FORWARD,a->sMvctx);
951: VecRestoreArray(xx,&x);
952: VecScatterEnd(a->slvec0,a->slvec1,ADD_VALUES,SCATTER_FORWARD,a->sMvctx);
953:
954: /* supperdiagonal part */
955: (*a->B->ops->multadd)(a->B,a->slvec1b,a->slvec1a,zz);
956:
957: return(0);
958: }
962: PetscErrorCode MatMultAdd_MPISBAIJ_2comm(Mat A,Vec xx,Vec yy,Vec zz)
963: {
964: Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
968: VecScatterBegin(xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD,a->Mvctx);
969: /* do diagonal part */
970: (*a->A->ops->multadd)(a->A,xx,yy,zz);
971: /* do supperdiagonal part */
972: VecScatterEnd(xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD,a->Mvctx);
973: (*a->B->ops->multadd)(a->B,a->lvec,zz,zz);
975: /* do subdiagonal part */
976: (*a->B->ops->multtranspose)(a->B,xx,a->lvec);
977: VecScatterBegin(a->lvec,zz,ADD_VALUES,SCATTER_REVERSE,a->Mvctx);
978: VecScatterEnd(a->lvec,zz,ADD_VALUES,SCATTER_REVERSE,a->Mvctx);
980: return(0);
981: }
985: PetscErrorCode MatMultTranspose_MPISBAIJ(Mat A,Vec xx,Vec yy)
986: {
990: MatMult(A,xx,yy);
991: return(0);
992: }
996: PetscErrorCode MatMultTransposeAdd_MPISBAIJ(Mat A,Vec xx,Vec yy,Vec zz)
997: {
1001: MatMultAdd(A,xx,yy,zz);
1002: return(0);
1003: }
1005: /*
1006: This only works correctly for square matrices where the subblock A->A is the
1007: diagonal block
1008: */
1011: PetscErrorCode MatGetDiagonal_MPISBAIJ(Mat A,Vec v)
1012: {
1013: Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
1017: /* if (a->M != a->N) SETERRQ(PETSC_ERR_SUP,"Supports only square matrix where A->A is diag block"); */
1018: MatGetDiagonal(a->A,v);
1019: return(0);
1020: }
1024: PetscErrorCode MatScale_MPISBAIJ(Mat A,PetscScalar aa)
1025: {
1026: Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
1030: MatScale(a->A,aa);
1031: MatScale(a->B,aa);
1032: return(0);
1033: }
1037: PetscErrorCode MatGetRow_MPISBAIJ(Mat matin,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
1038: {
1040: if (matin) SETERRQ(PETSC_ERR_SUP,"MatGetRow is not supported for SBAIJ matrix format");
1041: return(0);
1042: }
1046: PetscErrorCode MatRestoreRow_MPISBAIJ(Mat mat,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
1047: {
1048: Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
1051: if (!baij->getrowactive) {
1052: SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"MatGetRow() must be called first");
1053: }
1054: baij->getrowactive = PETSC_FALSE;
1055: return(0);
1056: }
1060: PetscErrorCode MatZeroEntries_MPISBAIJ(Mat A)
1061: {
1062: Mat_MPISBAIJ *l = (Mat_MPISBAIJ*)A->data;
1066: MatZeroEntries(l->A);
1067: MatZeroEntries(l->B);
1068: return(0);
1069: }
1073: PetscErrorCode MatGetInfo_MPISBAIJ(Mat matin,MatInfoType flag,MatInfo *info)
1074: {
1075: Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)matin->data;
1076: Mat A = a->A,B = a->B;
1078: PetscReal isend[5],irecv[5];
1081: info->block_size = (PetscReal)matin->bs;
1082: MatGetInfo(A,MAT_LOCAL,info);
1083: isend[0] = info->nz_used; isend[1] = info->nz_allocated; isend[2] = info->nz_unneeded;
1084: isend[3] = info->memory; isend[4] = info->mallocs;
1085: MatGetInfo(B,MAT_LOCAL,info);
1086: isend[0] += info->nz_used; isend[1] += info->nz_allocated; isend[2] += info->nz_unneeded;
1087: isend[3] += info->memory; isend[4] += info->mallocs;
1088: if (flag == MAT_LOCAL) {
1089: info->nz_used = isend[0];
1090: info->nz_allocated = isend[1];
1091: info->nz_unneeded = isend[2];
1092: info->memory = isend[3];
1093: info->mallocs = isend[4];
1094: } else if (flag == MAT_GLOBAL_MAX) {
1095: MPI_Allreduce(isend,irecv,5,MPIU_REAL,MPI_MAX,matin->comm);
1096: info->nz_used = irecv[0];
1097: info->nz_allocated = irecv[1];
1098: info->nz_unneeded = irecv[2];
1099: info->memory = irecv[3];
1100: info->mallocs = irecv[4];
1101: } else if (flag == MAT_GLOBAL_SUM) {
1102: MPI_Allreduce(isend,irecv,5,MPIU_REAL,MPI_SUM,matin->comm);
1103: info->nz_used = irecv[0];
1104: info->nz_allocated = irecv[1];
1105: info->nz_unneeded = irecv[2];
1106: info->memory = irecv[3];
1107: info->mallocs = irecv[4];
1108: } else {
1109: SETERRQ1(PETSC_ERR_ARG_WRONG,"Unknown MatInfoType argument %d",(int)flag);
1110: }
1111: info->rows_global = (PetscReal)A->M;
1112: info->columns_global = (PetscReal)A->N;
1113: info->rows_local = (PetscReal)A->m;
1114: info->columns_local = (PetscReal)A->N;
1115: info->fill_ratio_given = 0; /* no parallel LU/ILU/Cholesky */
1116: info->fill_ratio_needed = 0;
1117: info->factor_mallocs = 0;
1118: return(0);
1119: }
1123: PetscErrorCode MatSetOption_MPISBAIJ(Mat A,MatOption op)
1124: {
1125: Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
1129: switch (op) {
1130: case MAT_NO_NEW_NONZERO_LOCATIONS:
1131: case MAT_YES_NEW_NONZERO_LOCATIONS:
1132: case MAT_COLUMNS_UNSORTED:
1133: case MAT_COLUMNS_SORTED:
1134: case MAT_NEW_NONZERO_ALLOCATION_ERR:
1135: case MAT_KEEP_ZEROED_ROWS:
1136: case MAT_NEW_NONZERO_LOCATION_ERR:
1137: MatSetOption(a->A,op);
1138: MatSetOption(a->B,op);
1139: break;
1140: case MAT_ROW_ORIENTED:
1141: a->roworiented = PETSC_TRUE;
1142: MatSetOption(a->A,op);
1143: MatSetOption(a->B,op);
1144: break;
1145: case MAT_ROWS_SORTED:
1146: case MAT_ROWS_UNSORTED:
1147: case MAT_YES_NEW_DIAGONALS:
1148: PetscLogInfo((A,"Info:MatSetOption_MPIBAIJ:Option ignored\n"));
1149: break;
1150: case MAT_COLUMN_ORIENTED:
1151: a->roworiented = PETSC_FALSE;
1152: MatSetOption(a->A,op);
1153: MatSetOption(a->B,op);
1154: break;
1155: case MAT_IGNORE_OFF_PROC_ENTRIES:
1156: a->donotstash = PETSC_TRUE;
1157: break;
1158: case MAT_NO_NEW_DIAGONALS:
1159: SETERRQ(PETSC_ERR_SUP,"MAT_NO_NEW_DIAGONALS");
1160: case MAT_USE_HASH_TABLE:
1161: a->ht_flag = PETSC_TRUE;
1162: break;
1163: case MAT_NOT_SYMMETRIC:
1164: case MAT_NOT_STRUCTURALLY_SYMMETRIC:
1165: case MAT_HERMITIAN:
1166: SETERRQ(PETSC_ERR_SUP,"Matrix must be symmetric");
1167: case MAT_SYMMETRIC:
1168: case MAT_STRUCTURALLY_SYMMETRIC:
1169: case MAT_NOT_HERMITIAN:
1170: case MAT_SYMMETRY_ETERNAL:
1171: case MAT_NOT_SYMMETRY_ETERNAL:
1172: break;
1173: default:
1174: SETERRQ(PETSC_ERR_SUP,"unknown option");
1175: }
1176: return(0);
1177: }
1181: PetscErrorCode MatTranspose_MPISBAIJ(Mat A,Mat *B)
1182: {
1185: MatDuplicate(A,MAT_COPY_VALUES,B);
1186: return(0);
1187: }
1191: PetscErrorCode MatDiagonalScale_MPISBAIJ(Mat mat,Vec ll,Vec rr)
1192: {
1193: Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
1194: Mat a=baij->A, b=baij->B;
1196: PetscInt nv,m,n;
1197: PetscTruth flg;
1200: if (ll != rr){
1201: VecEqual(ll,rr,&flg);
1202: if (!flg)
1203: SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"For symmetric format, left and right scaling vectors must be same\n");
1204: }
1205: if (!ll) return(0);
1207: MatGetLocalSize(mat,&m,&n);
1208: if (m != n) SETERRQ2(PETSC_ERR_ARG_SIZ,"For symmetric format, local size %d %d must be same",m,n);
1209:
1210: VecGetLocalSize(rr,&nv);
1211: if (nv!=n) SETERRQ(PETSC_ERR_ARG_SIZ,"Left and right vector non-conforming local size");
1213: VecScatterBegin(rr,baij->lvec,INSERT_VALUES,SCATTER_FORWARD,baij->Mvctx);
1214:
1215: /* left diagonalscale the off-diagonal part */
1216: (*b->ops->diagonalscale)(b,ll,PETSC_NULL);
1217:
1218: /* scale the diagonal part */
1219: (*a->ops->diagonalscale)(a,ll,rr);
1221: /* right diagonalscale the off-diagonal part */
1222: VecScatterEnd(rr,baij->lvec,INSERT_VALUES,SCATTER_FORWARD,baij->Mvctx);
1223: (*b->ops->diagonalscale)(b,PETSC_NULL,baij->lvec);
1224: return(0);
1225: }
1229: PetscErrorCode MatPrintHelp_MPISBAIJ(Mat A)
1230: {
1231: Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
1232: MPI_Comm comm = A->comm;
1233: static PetscTruth called = PETSC_FALSE;
1234: PetscErrorCode ierr;
1237: if (!a->rank) {
1238: MatPrintHelp_SeqSBAIJ(a->A);
1239: }
1240: if (called) {return(0);} else called = PETSC_TRUE;
1241: (*PetscHelpPrintf)(comm," Options for MATMPISBAIJ matrix format (the defaults):\n");
1242: (*PetscHelpPrintf)(comm," -mat_use_hash_table <factor>: Use hashtable for efficient matrix assembly\n");
1243: return(0);
1244: }
1248: PetscErrorCode MatSetUnfactored_MPISBAIJ(Mat A)
1249: {
1250: Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
1254: MatSetUnfactored(a->A);
1255: return(0);
1256: }
1258: static PetscErrorCode MatDuplicate_MPISBAIJ(Mat,MatDuplicateOption,Mat *);
1262: PetscErrorCode MatEqual_MPISBAIJ(Mat A,Mat B,PetscTruth *flag)
1263: {
1264: Mat_MPISBAIJ *matB = (Mat_MPISBAIJ*)B->data,*matA = (Mat_MPISBAIJ*)A->data;
1265: Mat a,b,c,d;
1266: PetscTruth flg;
1270: a = matA->A; b = matA->B;
1271: c = matB->A; d = matB->B;
1273: MatEqual(a,c,&flg);
1274: if (flg) {
1275: MatEqual(b,d,&flg);
1276: }
1277: MPI_Allreduce(&flg,flag,1,MPI_INT,MPI_LAND,A->comm);
1278: return(0);
1279: }
1283: PetscErrorCode MatSetUpPreallocation_MPISBAIJ(Mat A)
1284: {
1288: MatMPISBAIJSetPreallocation(A,1,PETSC_DEFAULT,0,PETSC_DEFAULT,0);
1289: return(0);
1290: }
1294: PetscErrorCode MatGetSubMatrices_MPISBAIJ(Mat A,PetscInt n,const IS irow[],const IS icol[],MatReuse scall,Mat *B[])
1295: {
1297: PetscInt i;
1298: PetscTruth flg;
1301: for (i=0; i<n; i++) {
1302: ISEqual(irow[i],icol[i],&flg);
1303: if (!flg) {
1304: SETERRQ(PETSC_ERR_SUP,"Can only get symmetric submatrix for MPISBAIJ matrices");
1305: }
1306: }
1307: MatGetSubMatrices_MPIBAIJ(A,n,irow,icol,scall,B);
1308: return(0);
1309: }
1310:
1312: /* -------------------------------------------------------------------*/
1313: static struct _MatOps MatOps_Values = {
1314: MatSetValues_MPISBAIJ,
1315: MatGetRow_MPISBAIJ,
1316: MatRestoreRow_MPISBAIJ,
1317: MatMult_MPISBAIJ,
1318: /* 4*/ MatMultAdd_MPISBAIJ,
1319: MatMultTranspose_MPISBAIJ,
1320: MatMultTransposeAdd_MPISBAIJ,
1321: 0,
1322: 0,
1323: 0,
1324: /*10*/ 0,
1325: 0,
1326: 0,
1327: MatRelax_MPISBAIJ,
1328: MatTranspose_MPISBAIJ,
1329: /*15*/ MatGetInfo_MPISBAIJ,
1330: MatEqual_MPISBAIJ,
1331: MatGetDiagonal_MPISBAIJ,
1332: MatDiagonalScale_MPISBAIJ,
1333: MatNorm_MPISBAIJ,
1334: /*20*/ MatAssemblyBegin_MPISBAIJ,
1335: MatAssemblyEnd_MPISBAIJ,
1336: 0,
1337: MatSetOption_MPISBAIJ,
1338: MatZeroEntries_MPISBAIJ,
1339: /*25*/ 0,
1340: 0,
1341: 0,
1342: 0,
1343: 0,
1344: /*30*/ MatSetUpPreallocation_MPISBAIJ,
1345: 0,
1346: 0,
1347: 0,
1348: 0,
1349: /*35*/ MatDuplicate_MPISBAIJ,
1350: 0,
1351: 0,
1352: 0,
1353: 0,
1354: /*40*/ 0,
1355: MatGetSubMatrices_MPISBAIJ,
1356: MatIncreaseOverlap_MPISBAIJ,
1357: MatGetValues_MPISBAIJ,
1358: 0,
1359: /*45*/ MatPrintHelp_MPISBAIJ,
1360: MatScale_MPISBAIJ,
1361: 0,
1362: 0,
1363: 0,
1364: /*50*/ 0,
1365: 0,
1366: 0,
1367: 0,
1368: 0,
1369: /*55*/ 0,
1370: 0,
1371: MatSetUnfactored_MPISBAIJ,
1372: 0,
1373: MatSetValuesBlocked_MPISBAIJ,
1374: /*60*/ 0,
1375: 0,
1376: 0,
1377: MatGetPetscMaps_Petsc,
1378: 0,
1379: /*65*/ 0,
1380: 0,
1381: 0,
1382: 0,
1383: 0,
1384: /*70*/ MatGetRowMax_MPISBAIJ,
1385: 0,
1386: 0,
1387: 0,
1388: 0,
1389: /*75*/ 0,
1390: 0,
1391: 0,
1392: 0,
1393: 0,
1394: /*80*/ 0,
1395: 0,
1396: 0,
1397: 0,
1398: MatLoad_MPISBAIJ,
1399: /*85*/ 0,
1400: 0,
1401: 0,
1402: 0,
1403: 0,
1404: /*90*/ 0,
1405: 0,
1406: 0,
1407: 0,
1408: 0,
1409: /*95*/ 0,
1410: 0,
1411: 0,
1412: 0};
1418: PetscErrorCode PETSCMAT_DLLEXPORT MatGetDiagonalBlock_MPISBAIJ(Mat A,PetscTruth *iscopy,MatReuse reuse,Mat *a)
1419: {
1421: *a = ((Mat_MPISBAIJ *)A->data)->A;
1422: *iscopy = PETSC_FALSE;
1423: return(0);
1424: }
1430: PetscErrorCode PETSCMAT_DLLEXPORT MatMPISBAIJSetPreallocation_MPISBAIJ(Mat B,PetscInt bs,PetscInt d_nz,PetscInt *d_nnz,PetscInt o_nz,PetscInt *o_nnz)
1431: {
1432: Mat_MPISBAIJ *b;
1434: PetscInt i,mbs,Mbs;
1437: PetscOptionsGetInt(B->prefix,"-mat_block_size",&bs,PETSC_NULL);
1439: if (bs < 1) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Invalid block size specified, must be positive");
1440: if (d_nz == PETSC_DECIDE || d_nz == PETSC_DEFAULT) d_nz = 3;
1441: if (o_nz == PETSC_DECIDE || o_nz == PETSC_DEFAULT) o_nz = 1;
1442: if (d_nz < 0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"d_nz cannot be less than 0: value %D",d_nz);
1443: if (o_nz < 0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"o_nz cannot be less than 0: value %D",o_nz);
1444: if (d_nnz) {
1445: for (i=0; i<B->m/bs; i++) {
1446: if (d_nnz[i] < 0) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"d_nnz cannot be less than -1: local row %D value %D",i,d_nnz[i]);
1447: }
1448: }
1449: if (o_nnz) {
1450: for (i=0; i<B->m/bs; i++) {
1451: if (o_nnz[i] < 0) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"o_nnz cannot be less than -1: local row %D value %D",i,o_nnz[i]);
1452: }
1453: }
1454: B->preallocated = PETSC_TRUE;
1455: PetscSplitOwnershipBlock(B->comm,bs,&B->m,&B->M);
1456: PetscSplitOwnershipBlock(B->comm,bs,&B->n,&B->N);
1457: PetscMapCreateMPI(B->comm,B->m,B->M,&B->rmap);
1458: PetscMapCreateMPI(B->comm,B->m,B->M,&B->cmap);
1460: b = (Mat_MPISBAIJ*)B->data;
1461: mbs = B->m/bs;
1462: Mbs = B->M/bs;
1463: if (mbs*bs != B->m) {
1464: SETERRQ2(PETSC_ERR_ARG_SIZ,"No of local rows %D must be divisible by blocksize %D",B->m,bs);
1465: }
1467: B->bs = bs;
1468: b->bs2 = bs*bs;
1469: b->mbs = mbs;
1470: b->nbs = mbs;
1471: b->Mbs = Mbs;
1472: b->Nbs = Mbs;
1474: MPI_Allgather(&b->mbs,1,MPIU_INT,b->rowners+1,1,MPIU_INT,B->comm);
1475: b->rowners[0] = 0;
1476: for (i=2; i<=b->size; i++) {
1477: b->rowners[i] += b->rowners[i-1];
1478: }
1479: b->rstart = b->rowners[b->rank];
1480: b->rend = b->rowners[b->rank+1];
1481: b->cstart = b->rstart;
1482: b->cend = b->rend;
1483: for (i=0; i<=b->size; i++) {
1484: b->rowners_bs[i] = b->rowners[i]*bs;
1485: }
1486: b->rstart_bs = b-> rstart*bs;
1487: b->rend_bs = b->rend*bs;
1488:
1489: b->cstart_bs = b->cstart*bs;
1490: b->cend_bs = b->cend*bs;
1491:
1492: MatCreate(PETSC_COMM_SELF,&b->A);
1493: MatSetSizes(b->A,B->m,B->m,B->m,B->m);
1494: MatSetType(b->A,MATSEQSBAIJ);
1495: MatSeqSBAIJSetPreallocation(b->A,bs,d_nz,d_nnz);
1496: PetscLogObjectParent(B,b->A);
1498: MatCreate(PETSC_COMM_SELF,&b->B);
1499: MatSetSizes(b->B,B->m,B->M,B->m,B->M);
1500: MatSetType(b->B,MATSEQBAIJ);
1501: MatSeqBAIJSetPreallocation(b->B,bs,o_nz,o_nnz);
1502: PetscLogObjectParent(B,b->B);
1504: /* build cache for off array entries formed */
1505: MatStashCreate_Private(B->comm,bs,&B->bstash);
1507: return(0);
1508: }
1511: /*MC
1512: MATMPISBAIJ - MATMPISBAIJ = "mpisbaij" - A matrix type to be used for distributed symmetric sparse block matrices,
1513: based on block compressed sparse row format. Only the upper triangular portion of the matrix is stored.
1515: Options Database Keys:
1516: . -mat_type mpisbaij - sets the matrix type to "mpisbaij" during a call to MatSetFromOptions()
1518: Level: beginner
1520: .seealso: MatCreateMPISBAIJ
1521: M*/
1526: PetscErrorCode PETSCMAT_DLLEXPORT MatCreate_MPISBAIJ(Mat B)
1527: {
1528: Mat_MPISBAIJ *b;
1530: PetscTruth flg;
1534: PetscNew(Mat_MPISBAIJ,&b);
1535: B->data = (void*)b;
1536: PetscMemcpy(B->ops,&MatOps_Values,sizeof(struct _MatOps));
1538: B->ops->destroy = MatDestroy_MPISBAIJ;
1539: B->ops->view = MatView_MPISBAIJ;
1540: B->mapping = 0;
1541: B->factor = 0;
1542: B->assembled = PETSC_FALSE;
1544: B->insertmode = NOT_SET_VALUES;
1545: MPI_Comm_rank(B->comm,&b->rank);
1546: MPI_Comm_size(B->comm,&b->size);
1548: /* build local table of row and column ownerships */
1549: PetscMalloc(3*(b->size+2)*sizeof(PetscInt),&b->rowners);
1550: b->cowners = b->rowners + b->size + 2;
1551: b->rowners_bs = b->cowners + b->size + 2;
1552: PetscLogObjectMemory(B,3*(b->size+2)*sizeof(PetscInt)+sizeof(struct _p_Mat)+sizeof(Mat_MPISBAIJ));
1554: /* build cache for off array entries formed */
1555: MatStashCreate_Private(B->comm,1,&B->stash);
1556: b->donotstash = PETSC_FALSE;
1557: b->colmap = PETSC_NULL;
1558: b->garray = PETSC_NULL;
1559: b->roworiented = PETSC_TRUE;
1561: #if defined(PETSC_USE_MAT_SINGLE)
1562: /* stuff for MatSetValues_XXX in single precision */
1563: b->setvalueslen = 0;
1564: b->setvaluescopy = PETSC_NULL;
1565: #endif
1567: /* stuff used in block assembly */
1568: b->barray = 0;
1570: /* stuff used for matrix vector multiply */
1571: b->lvec = 0;
1572: b->Mvctx = 0;
1573: b->slvec0 = 0;
1574: b->slvec0b = 0;
1575: b->slvec1 = 0;
1576: b->slvec1a = 0;
1577: b->slvec1b = 0;
1578: b->sMvctx = 0;
1580: /* stuff for MatGetRow() */
1581: b->rowindices = 0;
1582: b->rowvalues = 0;
1583: b->getrowactive = PETSC_FALSE;
1585: /* hash table stuff */
1586: b->ht = 0;
1587: b->hd = 0;
1588: b->ht_size = 0;
1589: b->ht_flag = PETSC_FALSE;
1590: b->ht_fact = 0;
1591: b->ht_total_ct = 0;
1592: b->ht_insert_ct = 0;
1594: PetscOptionsHasName(B->prefix,"-mat_use_hash_table",&flg);
1595: if (flg) {
1596: PetscReal fact = 1.39;
1597: MatSetOption(B,MAT_USE_HASH_TABLE);
1598: PetscOptionsGetReal(B->prefix,"-mat_use_hash_table",&fact,PETSC_NULL);
1599: if (fact <= 1.0) fact = 1.39;
1600: MatMPIBAIJSetHashTableFactor(B,fact);
1601: PetscLogInfo((0,"MatCreateMPISBAIJ:Hash table Factor used %5.2f\n",fact));
1602: }
1603: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatStoreValues_C",
1604: "MatStoreValues_MPISBAIJ",
1605: MatStoreValues_MPISBAIJ);
1606: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatRetrieveValues_C",
1607: "MatRetrieveValues_MPISBAIJ",
1608: MatRetrieveValues_MPISBAIJ);
1609: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatGetDiagonalBlock_C",
1610: "MatGetDiagonalBlock_MPISBAIJ",
1611: MatGetDiagonalBlock_MPISBAIJ);
1612: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatMPISBAIJSetPreallocation_C",
1613: "MatMPISBAIJSetPreallocation_MPISBAIJ",
1614: MatMPISBAIJSetPreallocation_MPISBAIJ);
1615: B->symmetric = PETSC_TRUE;
1616: B->structurally_symmetric = PETSC_TRUE;
1617: B->symmetric_set = PETSC_TRUE;
1618: B->structurally_symmetric_set = PETSC_TRUE;
1619: return(0);
1620: }
1623: /*MC
1624: MATSBAIJ - MATSBAIJ = "sbaij" - A matrix type to be used for symmetric block sparse matrices.
1626: This matrix type is identical to MATSEQSBAIJ when constructed with a single process communicator,
1627: and MATMPISBAIJ otherwise.
1629: Options Database Keys:
1630: . -mat_type sbaij - sets the matrix type to "sbaij" during a call to MatSetFromOptions()
1632: Level: beginner
1634: .seealso: MatCreateMPISBAIJ,MATSEQSBAIJ,MATMPISBAIJ
1635: M*/
1640: PetscErrorCode PETSCMAT_DLLEXPORT MatCreate_SBAIJ(Mat A)
1641: {
1643: PetscMPIInt size;
1646: PetscObjectChangeTypeName((PetscObject)A,MATSBAIJ);
1647: MPI_Comm_size(A->comm,&size);
1648: if (size == 1) {
1649: MatSetType(A,MATSEQSBAIJ);
1650: } else {
1651: MatSetType(A,MATMPISBAIJ);
1652: }
1653: return(0);
1654: }
1659: /*@C
1660: MatMPISBAIJSetPreallocation - For good matrix assembly performance
1661: the user should preallocate the matrix storage by setting the parameters
1662: d_nz (or d_nnz) and o_nz (or o_nnz). By setting these parameters accurately,
1663: performance can be increased by more than a factor of 50.
1665: Collective on Mat
1667: Input Parameters:
1668: + A - the matrix
1669: . bs - size of blockk
1670: . d_nz - number of block nonzeros per block row in diagonal portion of local
1671: submatrix (same for all local rows)
1672: . d_nnz - array containing the number of block nonzeros in the various block rows
1673: in the upper triangular and diagonal part of the in diagonal portion of the local
1674: (possibly different for each block row) or PETSC_NULL. You must leave room
1675: for the diagonal entry even if it is zero.
1676: . o_nz - number of block nonzeros per block row in the off-diagonal portion of local
1677: submatrix (same for all local rows).
1678: - o_nnz - array containing the number of nonzeros in the various block rows of the
1679: off-diagonal portion of the local submatrix (possibly different for
1680: each block row) or PETSC_NULL.
1683: Options Database Keys:
1684: . -mat_no_unroll - uses code that does not unroll the loops in the
1685: block calculations (much slower)
1686: . -mat_block_size - size of the blocks to use
1688: Notes:
1690: If PETSC_DECIDE or PETSC_DETERMINE is used for a particular argument on one processor
1691: than it must be used on all processors that share the object for that argument.
1693: If the *_nnz parameter is given then the *_nz parameter is ignored
1695: Storage Information:
1696: For a square global matrix we define each processor's diagonal portion
1697: to be its local rows and the corresponding columns (a square submatrix);
1698: each processor's off-diagonal portion encompasses the remainder of the
1699: local matrix (a rectangular submatrix).
1701: The user can specify preallocated storage for the diagonal part of
1702: the local submatrix with either d_nz or d_nnz (not both). Set
1703: d_nz=PETSC_DEFAULT and d_nnz=PETSC_NULL for PETSc to control dynamic
1704: memory allocation. Likewise, specify preallocated storage for the
1705: off-diagonal part of the local submatrix with o_nz or o_nnz (not both).
1707: Consider a processor that owns rows 3, 4 and 5 of a parallel matrix. In
1708: the figure below we depict these three local rows and all columns (0-11).
1710: .vb
1711: 0 1 2 3 4 5 6 7 8 9 10 11
1712: -------------------
1713: row 3 | o o o d d d o o o o o o
1714: row 4 | o o o d d d o o o o o o
1715: row 5 | o o o d d d o o o o o o
1716: -------------------
1717: .ve
1718:
1719: Thus, any entries in the d locations are stored in the d (diagonal)
1720: submatrix, and any entries in the o locations are stored in the
1721: o (off-diagonal) submatrix. Note that the d matrix is stored in
1722: MatSeqSBAIJ format and the o submatrix in MATSEQBAIJ format.
1724: Now d_nz should indicate the number of block nonzeros per row in the upper triangular
1725: plus the diagonal part of the d matrix,
1726: and o_nz should indicate the number of block nonzeros per row in the o matrix.
1727: In general, for PDE problems in which most nonzeros are near the diagonal,
1728: one expects d_nz >> o_nz. For large problems you MUST preallocate memory
1729: or you will get TERRIBLE performance; see the users' manual chapter on
1730: matrices.
1732: Level: intermediate
1734: .keywords: matrix, block, aij, compressed row, sparse, parallel
1736: .seealso: MatCreate(), MatCreateSeqSBAIJ(), MatSetValues(), MatCreateMPIBAIJ()
1737: @*/
1738: PetscErrorCode PETSCMAT_DLLEXPORT MatMPISBAIJSetPreallocation(Mat B,PetscInt bs,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[])
1739: {
1740: PetscErrorCode ierr,(*f)(Mat,PetscInt,PetscInt,const PetscInt[],PetscInt,const PetscInt[]);
1743: PetscObjectQueryFunction((PetscObject)B,"MatMPISBAIJSetPreallocation_C",(void (**)(void))&f);
1744: if (f) {
1745: (*f)(B,bs,d_nz,d_nnz,o_nz,o_nnz);
1746: }
1747: return(0);
1748: }
1752: /*@C
1753: MatCreateMPISBAIJ - Creates a sparse parallel matrix in symmetric block AIJ format
1754: (block compressed row). For good matrix assembly performance
1755: the user should preallocate the matrix storage by setting the parameters
1756: d_nz (or d_nnz) and o_nz (or o_nnz). By setting these parameters accurately,
1757: performance can be increased by more than a factor of 50.
1759: Collective on MPI_Comm
1761: Input Parameters:
1762: + comm - MPI communicator
1763: . bs - size of blockk
1764: . m - number of local rows (or PETSC_DECIDE to have calculated if M is given)
1765: This value should be the same as the local size used in creating the
1766: y vector for the matrix-vector product y = Ax.
1767: . n - number of local columns (or PETSC_DECIDE to have calculated if N is given)
1768: This value should be the same as the local size used in creating the
1769: x vector for the matrix-vector product y = Ax.
1770: . M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
1771: . N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
1772: . d_nz - number of block nonzeros per block row in diagonal portion of local
1773: submatrix (same for all local rows)
1774: . d_nnz - array containing the number of block nonzeros in the various block rows
1775: in the upper triangular portion of the in diagonal portion of the local
1776: (possibly different for each block block row) or PETSC_NULL.
1777: You must leave room for the diagonal entry even if it is zero.
1778: . o_nz - number of block nonzeros per block row in the off-diagonal portion of local
1779: submatrix (same for all local rows).
1780: - o_nnz - array containing the number of nonzeros in the various block rows of the
1781: off-diagonal portion of the local submatrix (possibly different for
1782: each block row) or PETSC_NULL.
1784: Output Parameter:
1785: . A - the matrix
1787: Options Database Keys:
1788: . -mat_no_unroll - uses code that does not unroll the loops in the
1789: block calculations (much slower)
1790: . -mat_block_size - size of the blocks to use
1791: . -mat_mpi - use the parallel matrix data structures even on one processor
1792: (defaults to using SeqBAIJ format on one processor)
1794: Notes:
1795: The number of rows and columns must be divisible by blocksize.
1797: The user MUST specify either the local or global matrix dimensions
1798: (possibly both).
1800: If PETSC_DECIDE or PETSC_DETERMINE is used for a particular argument on one processor
1801: than it must be used on all processors that share the object for that argument.
1803: If the *_nnz parameter is given then the *_nz parameter is ignored
1805: Storage Information:
1806: For a square global matrix we define each processor's diagonal portion
1807: to be its local rows and the corresponding columns (a square submatrix);
1808: each processor's off-diagonal portion encompasses the remainder of the
1809: local matrix (a rectangular submatrix).
1811: The user can specify preallocated storage for the diagonal part of
1812: the local submatrix with either d_nz or d_nnz (not both). Set
1813: d_nz=PETSC_DEFAULT and d_nnz=PETSC_NULL for PETSc to control dynamic
1814: memory allocation. Likewise, specify preallocated storage for the
1815: off-diagonal part of the local submatrix with o_nz or o_nnz (not both).
1817: Consider a processor that owns rows 3, 4 and 5 of a parallel matrix. In
1818: the figure below we depict these three local rows and all columns (0-11).
1820: .vb
1821: 0 1 2 3 4 5 6 7 8 9 10 11
1822: -------------------
1823: row 3 | o o o d d d o o o o o o
1824: row 4 | o o o d d d o o o o o o
1825: row 5 | o o o d d d o o o o o o
1826: -------------------
1827: .ve
1828:
1829: Thus, any entries in the d locations are stored in the d (diagonal)
1830: submatrix, and any entries in the o locations are stored in the
1831: o (off-diagonal) submatrix. Note that the d matrix is stored in
1832: MatSeqSBAIJ format and the o submatrix in MATSEQBAIJ format.
1834: Now d_nz should indicate the number of block nonzeros per row in the upper triangular
1835: plus the diagonal part of the d matrix,
1836: and o_nz should indicate the number of block nonzeros per row in the o matrix.
1837: In general, for PDE problems in which most nonzeros are near the diagonal,
1838: one expects d_nz >> o_nz. For large problems you MUST preallocate memory
1839: or you will get TERRIBLE performance; see the users' manual chapter on
1840: matrices.
1842: Level: intermediate
1844: .keywords: matrix, block, aij, compressed row, sparse, parallel
1846: .seealso: MatCreate(), MatCreateSeqSBAIJ(), MatSetValues(), MatCreateMPIBAIJ()
1847: @*/
1849: PetscErrorCode PETSCMAT_DLLEXPORT MatCreateMPISBAIJ(MPI_Comm comm,PetscInt bs,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[],Mat *A)
1850: {
1852: PetscMPIInt size;
1855: MatCreate(comm,A);
1856: MatSetSizes(*A,m,n,M,N);
1857: MPI_Comm_size(comm,&size);
1858: if (size > 1) {
1859: MatSetType(*A,MATMPISBAIJ);
1860: MatMPISBAIJSetPreallocation(*A,bs,d_nz,d_nnz,o_nz,o_nnz);
1861: } else {
1862: MatSetType(*A,MATSEQSBAIJ);
1863: MatSeqSBAIJSetPreallocation(*A,bs,d_nz,d_nnz);
1864: }
1865: return(0);
1866: }
1871: static PetscErrorCode MatDuplicate_MPISBAIJ(Mat matin,MatDuplicateOption cpvalues,Mat *newmat)
1872: {
1873: Mat mat;
1874: Mat_MPISBAIJ *a,*oldmat = (Mat_MPISBAIJ*)matin->data;
1876: PetscInt len=0,nt,bs=matin->bs,mbs=oldmat->mbs;
1877: PetscScalar *array;
1880: *newmat = 0;
1881: MatCreate(matin->comm,&mat);
1882: MatSetSizes(mat,matin->m,matin->n,matin->M,matin->N);
1883: MatSetType(mat,matin->type_name);
1884: PetscMemcpy(mat->ops,matin->ops,sizeof(struct _MatOps));
1885:
1886: mat->factor = matin->factor;
1887: mat->preallocated = PETSC_TRUE;
1888: mat->assembled = PETSC_TRUE;
1889: mat->insertmode = NOT_SET_VALUES;
1891: a = (Mat_MPISBAIJ*)mat->data;
1892: mat->bs = matin->bs;
1893: a->bs2 = oldmat->bs2;
1894: a->mbs = oldmat->mbs;
1895: a->nbs = oldmat->nbs;
1896: a->Mbs = oldmat->Mbs;
1897: a->Nbs = oldmat->Nbs;
1898:
1899: a->rstart = oldmat->rstart;
1900: a->rend = oldmat->rend;
1901: a->cstart = oldmat->cstart;
1902: a->cend = oldmat->cend;
1903: a->size = oldmat->size;
1904: a->rank = oldmat->rank;
1905: a->donotstash = oldmat->donotstash;
1906: a->roworiented = oldmat->roworiented;
1907: a->rowindices = 0;
1908: a->rowvalues = 0;
1909: a->getrowactive = PETSC_FALSE;
1910: a->barray = 0;
1911: a->rstart_bs = oldmat->rstart_bs;
1912: a->rend_bs = oldmat->rend_bs;
1913: a->cstart_bs = oldmat->cstart_bs;
1914: a->cend_bs = oldmat->cend_bs;
1916: /* hash table stuff */
1917: a->ht = 0;
1918: a->hd = 0;
1919: a->ht_size = 0;
1920: a->ht_flag = oldmat->ht_flag;
1921: a->ht_fact = oldmat->ht_fact;
1922: a->ht_total_ct = 0;
1923: a->ht_insert_ct = 0;
1924:
1925: PetscMemcpy(a->rowners,oldmat->rowners,3*(a->size+2)*sizeof(PetscInt));
1926: MatStashCreate_Private(matin->comm,1,&mat->stash);
1927: MatStashCreate_Private(matin->comm,matin->bs,&mat->bstash);
1928: if (oldmat->colmap) {
1929: #if defined (PETSC_USE_CTABLE)
1930: PetscTableCreateCopy(oldmat->colmap,&a->colmap);
1931: #else
1932: PetscMalloc((a->Nbs)*sizeof(PetscInt),&a->colmap);
1933: PetscLogObjectMemory(mat,(a->Nbs)*sizeof(PetscInt));
1934: PetscMemcpy(a->colmap,oldmat->colmap,(a->Nbs)*sizeof(PetscInt));
1935: #endif
1936: } else a->colmap = 0;
1938: if (oldmat->garray && (len = ((Mat_SeqBAIJ*)(oldmat->B->data))->nbs)) {
1939: PetscMalloc(len*sizeof(PetscInt),&a->garray);
1940: PetscLogObjectMemory(mat,len*sizeof(PetscInt));
1941: PetscMemcpy(a->garray,oldmat->garray,len*sizeof(PetscInt));
1942: } else a->garray = 0;
1943:
1944: VecDuplicate(oldmat->lvec,&a->lvec);
1945: PetscLogObjectParent(mat,a->lvec);
1946: VecScatterCopy(oldmat->Mvctx,&a->Mvctx);
1947: PetscLogObjectParent(mat,a->Mvctx);
1949: VecDuplicate(oldmat->slvec0,&a->slvec0);
1950: PetscLogObjectParent(mat,a->slvec0);
1951: VecDuplicate(oldmat->slvec1,&a->slvec1);
1952: PetscLogObjectParent(mat,a->slvec1);
1954: VecGetLocalSize(a->slvec1,&nt);
1955: VecGetArray(a->slvec1,&array);
1956: VecCreateSeqWithArray(PETSC_COMM_SELF,bs*mbs,array,&a->slvec1a);
1957: VecCreateSeqWithArray(PETSC_COMM_SELF,nt-bs*mbs,array+bs*mbs,&a->slvec1b);
1958: VecRestoreArray(a->slvec1,&array);
1959: VecGetArray(a->slvec0,&array);
1960: VecCreateSeqWithArray(PETSC_COMM_SELF,nt-bs*mbs,array+bs*mbs,&a->slvec0b);
1961: VecRestoreArray(a->slvec0,&array);
1962: PetscLogObjectParent(mat,a->slvec0);
1963: PetscLogObjectParent(mat,a->slvec1);
1964: PetscLogObjectParent(mat,a->slvec0b);
1965: PetscLogObjectParent(mat,a->slvec1a);
1966: PetscLogObjectParent(mat,a->slvec1b);
1968: /* VecScatterCopy(oldmat->sMvctx,&a->sMvctx); - not written yet, replaced by the lazy trick: */
1969: PetscObjectReference((PetscObject)oldmat->sMvctx);
1970: a->sMvctx = oldmat->sMvctx;
1971: PetscLogObjectParent(mat,a->sMvctx);
1973: MatDuplicate(oldmat->A,cpvalues,&a->A);
1974: PetscLogObjectParent(mat,a->A);
1975: MatDuplicate(oldmat->B,cpvalues,&a->B);
1976: PetscLogObjectParent(mat,a->B);
1977: PetscFListDuplicate(mat->qlist,&matin->qlist);
1978: *newmat = mat;
1979: return(0);
1980: }
1982: #include petscsys.h
1986: PetscErrorCode MatLoad_MPISBAIJ(PetscViewer viewer, MatType type,Mat *newmat)
1987: {
1988: Mat A;
1990: PetscInt i,nz,j,rstart,rend;
1991: PetscScalar *vals,*buf;
1992: MPI_Comm comm = ((PetscObject)viewer)->comm;
1993: MPI_Status status;
1994: PetscMPIInt rank,size,tag = ((PetscObject)viewer)->tag,*sndcounts = 0,*browners,maxnz,*rowners;
1995: PetscInt header[4],*rowlengths = 0,M,N,m,*cols;
1996: PetscInt *locrowlens,*procsnz = 0,jj,*mycols,*ibuf;
1997: PetscInt bs=1,Mbs,mbs,extra_rows;
1998: PetscInt *dlens,*odlens,*mask,*masked1,*masked2,rowcount,odcount;
1999: PetscInt dcount,kmax,k,nzcount,tmp;
2000: int fd;
2001:
2003: PetscOptionsGetInt(PETSC_NULL,"-matload_block_size",&bs,PETSC_NULL);
2005: MPI_Comm_size(comm,&size);
2006: MPI_Comm_rank(comm,&rank);
2007: if (!rank) {
2008: PetscViewerBinaryGetDescriptor(viewer,&fd);
2009: PetscBinaryRead(fd,(char *)header,4,PETSC_INT);
2010: if (header[0] != MAT_FILE_COOKIE) SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"not matrix object");
2011: if (header[3] < 0) {
2012: SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"Matrix stored in special format, cannot load as MPISBAIJ");
2013: }
2014: }
2016: MPI_Bcast(header+1,3,MPIU_INT,0,comm);
2017: M = header[1]; N = header[2];
2019: if (M != N) SETERRQ(PETSC_ERR_SUP,"Can only do square matrices");
2021: /*
2022: This code adds extra rows to make sure the number of rows is
2023: divisible by the blocksize
2024: */
2025: Mbs = M/bs;
2026: extra_rows = bs - M + bs*(Mbs);
2027: if (extra_rows == bs) extra_rows = 0;
2028: else Mbs++;
2029: if (extra_rows &&!rank) {
2030: PetscLogInfo((0,"MatLoad_MPISBAIJ:Padding loaded matrix to match blocksize\n"));
2031: }
2033: /* determine ownership of all rows */
2034: mbs = Mbs/size + ((Mbs % size) > rank);
2035: m = mbs*bs;
2036: PetscMalloc(2*(size+2)*sizeof(PetscMPIInt),&rowners);
2037: browners = rowners + size + 1;
2038: MPI_Allgather(&mbs,1,MPI_INT,rowners+1,1,MPI_INT,comm);
2039: rowners[0] = 0;
2040: for (i=2; i<=size; i++) rowners[i] += rowners[i-1];
2041: for (i=0; i<=size; i++) browners[i] = rowners[i]*bs;
2042: rstart = rowners[rank];
2043: rend = rowners[rank+1];
2044:
2045: /* distribute row lengths to all processors */
2046: PetscMalloc((rend-rstart)*bs*sizeof(PetscInt),&locrowlens);
2047: if (!rank) {
2048: PetscMalloc((M+extra_rows)*sizeof(PetscInt),&rowlengths);
2049: PetscBinaryRead(fd,rowlengths,M,PETSC_INT);
2050: for (i=0; i<extra_rows; i++) rowlengths[M+i] = 1;
2051: PetscMalloc(size*sizeof(PetscMPIInt),&sndcounts);
2052: for (i=0; i<size; i++) sndcounts[i] = browners[i+1] - browners[i];
2053: MPI_Scatterv(rowlengths,sndcounts,browners,MPIU_INT,locrowlens,(rend-rstart)*bs,MPIU_INT,0,comm);
2054: PetscFree(sndcounts);
2055: } else {
2056: MPI_Scatterv(0,0,0,MPIU_INT,locrowlens,(rend-rstart)*bs,MPIU_INT,0,comm);
2057: }
2058:
2059: if (!rank) { /* procs[0] */
2060: /* calculate the number of nonzeros on each processor */
2061: PetscMalloc(size*sizeof(PetscInt),&procsnz);
2062: PetscMemzero(procsnz,size*sizeof(PetscInt));
2063: for (i=0; i<size; i++) {
2064: for (j=rowners[i]*bs; j< rowners[i+1]*bs; j++) {
2065: procsnz[i] += rowlengths[j];
2066: }
2067: }
2068: PetscFree(rowlengths);
2069:
2070: /* determine max buffer needed and allocate it */
2071: maxnz = 0;
2072: for (i=0; i<size; i++) {
2073: maxnz = PetscMax(maxnz,procsnz[i]);
2074: }
2075: PetscMalloc(maxnz*sizeof(PetscInt),&cols);
2077: /* read in my part of the matrix column indices */
2078: nz = procsnz[0];
2079: PetscMalloc(nz*sizeof(PetscInt),&ibuf);
2080: mycols = ibuf;
2081: if (size == 1) nz -= extra_rows;
2082: PetscBinaryRead(fd,mycols,nz,PETSC_INT);
2083: if (size == 1) for (i=0; i< extra_rows; i++) { mycols[nz+i] = M+i; }
2085: /* read in every ones (except the last) and ship off */
2086: for (i=1; i<size-1; i++) {
2087: nz = procsnz[i];
2088: PetscBinaryRead(fd,cols,nz,PETSC_INT);
2089: MPI_Send(cols,nz,MPIU_INT,i,tag,comm);
2090: }
2091: /* read in the stuff for the last proc */
2092: if (size != 1) {
2093: nz = procsnz[size-1] - extra_rows; /* the extra rows are not on the disk */
2094: PetscBinaryRead(fd,cols,nz,PETSC_INT);
2095: for (i=0; i<extra_rows; i++) cols[nz+i] = M+i;
2096: MPI_Send(cols,nz+extra_rows,MPIU_INT,size-1,tag,comm);
2097: }
2098: PetscFree(cols);
2099: } else { /* procs[i], i>0 */
2100: /* determine buffer space needed for message */
2101: nz = 0;
2102: for (i=0; i<m; i++) {
2103: nz += locrowlens[i];
2104: }
2105: PetscMalloc(nz*sizeof(PetscInt),&ibuf);
2106: mycols = ibuf;
2107: /* receive message of column indices*/
2108: MPI_Recv(mycols,nz,MPIU_INT,0,tag,comm,&status);
2109: MPI_Get_count(&status,MPIU_INT,&maxnz);
2110: if (maxnz != nz) SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"something is wrong with file");
2111: }
2113: /* loop over local rows, determining number of off diagonal entries */
2114: PetscMalloc(2*(rend-rstart+1)*sizeof(PetscInt),&dlens);
2115: odlens = dlens + (rend-rstart);
2116: PetscMalloc(3*Mbs*sizeof(PetscInt),&mask);
2117: PetscMemzero(mask,3*Mbs*sizeof(PetscInt));
2118: masked1 = mask + Mbs;
2119: masked2 = masked1 + Mbs;
2120: rowcount = 0; nzcount = 0;
2121: for (i=0; i<mbs; i++) {
2122: dcount = 0;
2123: odcount = 0;
2124: for (j=0; j<bs; j++) {
2125: kmax = locrowlens[rowcount];
2126: for (k=0; k<kmax; k++) {
2127: tmp = mycols[nzcount++]/bs; /* block col. index */
2128: if (!mask[tmp]) {
2129: mask[tmp] = 1;
2130: if (tmp < rstart || tmp >= rend) masked2[odcount++] = tmp; /* entry in off-diag portion */
2131: else masked1[dcount++] = tmp; /* entry in diag portion */
2132: }
2133: }
2134: rowcount++;
2135: }
2136:
2137: dlens[i] = dcount; /* d_nzz[i] */
2138: odlens[i] = odcount; /* o_nzz[i] */
2140: /* zero out the mask elements we set */
2141: for (j=0; j<dcount; j++) mask[masked1[j]] = 0;
2142: for (j=0; j<odcount; j++) mask[masked2[j]] = 0;
2143: }
2144:
2145: /* create our matrix */
2146: MatCreate(comm,&A);
2147: MatSetSizes(A,m,m,PETSC_DETERMINE,PETSC_DETERMINE);
2148: MatSetType(A,type);
2149: MatMPISBAIJSetPreallocation(A,bs,0,dlens,0,odlens);
2150: MatSetOption(A,MAT_COLUMNS_SORTED);
2151:
2152: if (!rank) {
2153: PetscMalloc(maxnz*sizeof(PetscScalar),&buf);
2154: /* read in my part of the matrix numerical values */
2155: nz = procsnz[0];
2156: vals = buf;
2157: mycols = ibuf;
2158: if (size == 1) nz -= extra_rows;
2159: PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);
2160: if (size == 1) for (i=0; i< extra_rows; i++) { vals[nz+i] = 1.0; }
2162: /* insert into matrix */
2163: jj = rstart*bs;
2164: for (i=0; i<m; i++) {
2165: MatSetValues(A,1,&jj,locrowlens[i],mycols,vals,INSERT_VALUES);
2166: mycols += locrowlens[i];
2167: vals += locrowlens[i];
2168: jj++;
2169: }
2171: /* read in other processors (except the last one) and ship out */
2172: for (i=1; i<size-1; i++) {
2173: nz = procsnz[i];
2174: vals = buf;
2175: PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);
2176: MPI_Send(vals,nz,MPIU_SCALAR,i,A->tag,comm);
2177: }
2178: /* the last proc */
2179: if (size != 1){
2180: nz = procsnz[i] - extra_rows;
2181: vals = buf;
2182: PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);
2183: for (i=0; i<extra_rows; i++) vals[nz+i] = 1.0;
2184: MPI_Send(vals,nz+extra_rows,MPIU_SCALAR,size-1,A->tag,comm);
2185: }
2186: PetscFree(procsnz);
2188: } else {
2189: /* receive numeric values */
2190: PetscMalloc(nz*sizeof(PetscScalar),&buf);
2192: /* receive message of values*/
2193: vals = buf;
2194: mycols = ibuf;
2195: MPI_Recv(vals,nz,MPIU_SCALAR,0,A->tag,comm,&status);
2196: MPI_Get_count(&status,MPIU_SCALAR,&maxnz);
2197: if (maxnz != nz) SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"something is wrong with file");
2199: /* insert into matrix */
2200: jj = rstart*bs;
2201: for (i=0; i<m; i++) {
2202: MatSetValues_MPISBAIJ(A,1,&jj,locrowlens[i],mycols,vals,INSERT_VALUES);
2203: mycols += locrowlens[i];
2204: vals += locrowlens[i];
2205: jj++;
2206: }
2207: }
2209: PetscFree(locrowlens);
2210: PetscFree(buf);
2211: PetscFree(ibuf);
2212: PetscFree(rowners);
2213: PetscFree(dlens);
2214: PetscFree(mask);
2215: MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
2216: MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
2217: *newmat = A;
2218: return(0);
2219: }
2223: /*XXXXX@
2224: MatMPISBAIJSetHashTableFactor - Sets the factor required to compute the size of the HashTable.
2226: Input Parameters:
2227: . mat - the matrix
2228: . fact - factor
2230: Collective on Mat
2232: Level: advanced
2234: Notes:
2235: This can also be set by the command line option: -mat_use_hash_table fact
2237: .keywords: matrix, hashtable, factor, HT
2239: .seealso: MatSetOption()
2240: @XXXXX*/
2245: PetscErrorCode MatGetRowMax_MPISBAIJ(Mat A,Vec v)
2246: {
2247: Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
2248: Mat_SeqBAIJ *b = (Mat_SeqBAIJ*)(a->B)->data;
2249: PetscReal atmp;
2250: PetscReal *work,*svalues,*rvalues;
2252: PetscInt i,bs,mbs,*bi,*bj,brow,j,ncols,krow,kcol,col,row,Mbs,bcol;
2253: PetscMPIInt rank,size;
2254: PetscInt *rowners_bs,dest,count,source;
2255: PetscScalar *va;
2256: MatScalar *ba;
2257: MPI_Status stat;
2260: MatGetRowMax(a->A,v);
2261: VecGetArray(v,&va);
2263: MPI_Comm_size(A->comm,&size);
2264: MPI_Comm_rank(A->comm,&rank);
2266: bs = A->bs;
2267: mbs = a->mbs;
2268: Mbs = a->Mbs;
2269: ba = b->a;
2270: bi = b->i;
2271: bj = b->j;
2273: /* find ownerships */
2274: rowners_bs = a->rowners_bs;
2276: /* each proc creates an array to be distributed */
2277: PetscMalloc(bs*Mbs*sizeof(PetscReal),&work);
2278: PetscMemzero(work,bs*Mbs*sizeof(PetscReal));
2280: /* row_max for B */
2281: if (rank != size-1){
2282: for (i=0; i<mbs; i++) {
2283: ncols = bi[1] - bi[0]; bi++;
2284: brow = bs*i;
2285: for (j=0; j<ncols; j++){
2286: bcol = bs*(*bj);
2287: for (kcol=0; kcol<bs; kcol++){
2288: col = bcol + kcol; /* local col index */
2289: col += rowners_bs[rank+1]; /* global col index */
2290: for (krow=0; krow<bs; krow++){
2291: atmp = PetscAbsScalar(*ba); ba++;
2292: row = brow + krow; /* local row index */
2293: if (PetscRealPart(va[row]) < atmp) va[row] = atmp;
2294: if (work[col] < atmp) work[col] = atmp;
2295: }
2296: }
2297: bj++;
2298: }
2299: }
2301: /* send values to its owners */
2302: for (dest=rank+1; dest<size; dest++){
2303: svalues = work + rowners_bs[dest];
2304: count = rowners_bs[dest+1]-rowners_bs[dest];
2305: MPI_Send(svalues,count,MPIU_REAL,dest,rank,A->comm);
2306: }
2307: }
2308:
2309: /* receive values */
2310: if (rank){
2311: rvalues = work;
2312: count = rowners_bs[rank+1]-rowners_bs[rank];
2313: for (source=0; source<rank; source++){
2314: MPI_Recv(rvalues,count,MPIU_REAL,MPI_ANY_SOURCE,MPI_ANY_TAG,A->comm,&stat);
2315: /* process values */
2316: for (i=0; i<count; i++){
2317: if (PetscRealPart(va[i]) < rvalues[i]) va[i] = rvalues[i];
2318: }
2319: }
2320: }
2322: VecRestoreArray(v,&va);
2323: PetscFree(work);
2324: return(0);
2325: }
2329: PetscErrorCode MatRelax_MPISBAIJ(Mat matin,Vec bb,PetscReal omega,MatSORType flag,PetscReal fshift,PetscInt its,PetscInt lits,Vec xx)
2330: {
2331: Mat_MPISBAIJ *mat = (Mat_MPISBAIJ*)matin->data;
2333: PetscInt mbs=mat->mbs,bs=matin->bs;
2334: PetscScalar mone=-1.0,*x,*b,*ptr,zero=0.0;
2335: Vec bb1;
2336:
2338: if (its <= 0 || lits <= 0) SETERRQ2(PETSC_ERR_ARG_WRONG,"Relaxation requires global its %D and local its %D both positive",its,lits);
2339: if (bs > 1)
2340: SETERRQ(PETSC_ERR_SUP,"SSOR for block size > 1 is not yet implemented");
2342: if ((flag & SOR_LOCAL_SYMMETRIC_SWEEP) == SOR_LOCAL_SYMMETRIC_SWEEP){
2343: if ( flag & SOR_ZERO_INITIAL_GUESS ) {
2344: (*mat->A->ops->relax)(mat->A,bb,omega,flag,fshift,lits,lits,xx);
2345: its--;
2346: }
2348: VecDuplicate(bb,&bb1);
2349: while (its--){
2350:
2351: /* lower triangular part: slvec0b = - B^T*xx */
2352: (*mat->B->ops->multtranspose)(mat->B,xx,mat->slvec0b);
2353:
2354: /* copy xx into slvec0a */
2355: VecGetArray(mat->slvec0,&ptr);
2356: VecGetArray(xx,&x);
2357: PetscMemcpy(ptr,x,bs*mbs*sizeof(MatScalar));
2358: VecRestoreArray(mat->slvec0,&ptr);
2360: VecScale(mat->slvec0,mone);
2362: /* copy bb into slvec1a */
2363: VecGetArray(mat->slvec1,&ptr);
2364: VecGetArray(bb,&b);
2365: PetscMemcpy(ptr,b,bs*mbs*sizeof(MatScalar));
2366: VecRestoreArray(mat->slvec1,&ptr);
2368: /* set slvec1b = 0 */
2369: VecSet(mat->slvec1b,zero);
2371: VecScatterBegin(mat->slvec0,mat->slvec1,ADD_VALUES,SCATTER_FORWARD,mat->sMvctx);
2372: VecRestoreArray(xx,&x);
2373: VecRestoreArray(bb,&b);
2374: VecScatterEnd(mat->slvec0,mat->slvec1,ADD_VALUES,SCATTER_FORWARD,mat->sMvctx);
2376: /* upper triangular part: bb1 = bb1 - B*x */
2377: (*mat->B->ops->multadd)(mat->B,mat->slvec1b,mat->slvec1a,bb1);
2378:
2379: /* local diagonal sweep */
2380: (*mat->A->ops->relax)(mat->A,bb1,omega,SOR_SYMMETRIC_SWEEP,fshift,lits,lits,xx);
2381: }
2382: VecDestroy(bb1);
2383: } else {
2384: SETERRQ(PETSC_ERR_SUP,"MatSORType is not supported for SBAIJ matrix format");
2385: }
2386: return(0);
2387: }
2391: PetscErrorCode MatRelax_MPISBAIJ_2comm(Mat matin,Vec bb,PetscReal omega,MatSORType flag,PetscReal fshift,PetscInt its,PetscInt lits,Vec xx)
2392: {
2393: Mat_MPISBAIJ *mat = (Mat_MPISBAIJ*)matin->data;
2395: PetscScalar mone=-1.0;
2396: Vec lvec1,bb1;
2397:
2399: if (its <= 0 || lits <= 0) SETERRQ2(PETSC_ERR_ARG_WRONG,"Relaxation requires global its %D and local its %D both positive",its,lits);
2400: if (matin->bs > 1)
2401: SETERRQ(PETSC_ERR_SUP,"SSOR for block size > 1 is not yet implemented");
2403: if ((flag & SOR_LOCAL_SYMMETRIC_SWEEP) == SOR_LOCAL_SYMMETRIC_SWEEP){
2404: if ( flag & SOR_ZERO_INITIAL_GUESS ) {
2405: (*mat->A->ops->relax)(mat->A,bb,omega,flag,fshift,lits,lits,xx);
2406: its--;
2407: }
2409: VecDuplicate(mat->lvec,&lvec1);
2410: VecDuplicate(bb,&bb1);
2411: while (its--){
2412: VecScatterBegin(xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD,mat->Mvctx);
2413:
2414: /* lower diagonal part: bb1 = bb - B^T*xx */
2415: (*mat->B->ops->multtranspose)(mat->B,xx,lvec1);
2416: VecScale(lvec1,mone);
2418: VecScatterEnd(xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD,mat->Mvctx);
2419: VecCopy(bb,bb1);
2420: VecScatterBegin(lvec1,bb1,ADD_VALUES,SCATTER_REVERSE,mat->Mvctx);
2422: /* upper diagonal part: bb1 = bb1 - B*x */
2423: VecScale(mat->lvec,mone);
2424: (*mat->B->ops->multadd)(mat->B,mat->lvec,bb1,bb1);
2426: VecScatterEnd(lvec1,bb1,ADD_VALUES,SCATTER_REVERSE,mat->Mvctx);
2427:
2428: /* diagonal sweep */
2429: (*mat->A->ops->relax)(mat->A,bb1,omega,SOR_SYMMETRIC_SWEEP,fshift,lits,lits,xx);
2430: }
2431: VecDestroy(lvec1);
2432: VecDestroy(bb1);
2433: } else {
2434: SETERRQ(PETSC_ERR_SUP,"MatSORType is not supported for SBAIJ matrix format");
2435: }
2436: return(0);
2437: }