Actual source code: mpibaij.c
1: #define PETSCMAT_DLL
3: #include src/mat/impls/baij/mpi/mpibaij.h
5: EXTERN PetscErrorCode MatSetUpMultiply_MPIBAIJ(Mat);
6: EXTERN PetscErrorCode DisAssemble_MPIBAIJ(Mat);
7: EXTERN PetscErrorCode MatIncreaseOverlap_MPIBAIJ(Mat,PetscInt,IS[],PetscInt);
8: EXTERN PetscErrorCode MatGetSubMatrices_MPIBAIJ(Mat,PetscInt,const IS[],const IS[],MatReuse,Mat *[]);
9: EXTERN PetscErrorCode MatGetValues_SeqBAIJ(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt [],PetscScalar []);
10: EXTERN PetscErrorCode MatSetValues_SeqBAIJ(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt [],const PetscScalar [],InsertMode);
11: EXTERN PetscErrorCode MatSetValuesBlocked_SeqBAIJ(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt[],const PetscScalar[],InsertMode);
12: EXTERN PetscErrorCode MatGetRow_SeqBAIJ(Mat,PetscInt,PetscInt*,PetscInt*[],PetscScalar*[]);
13: EXTERN PetscErrorCode MatRestoreRow_SeqBAIJ(Mat,PetscInt,PetscInt*,PetscInt*[],PetscScalar*[]);
14: EXTERN PetscErrorCode MatPrintHelp_SeqBAIJ(Mat);
15: EXTERN PetscErrorCode MatZeroRows_SeqBAIJ(Mat,PetscInt,const PetscInt[],PetscScalar);
17: /* UGLY, ugly, ugly
18: When MatScalar == PetscScalar the function MatSetValuesBlocked_MPIBAIJ_MatScalar() does
19: not exist. Otherwise ..._MatScalar() takes matrix elements in single precision and
20: inserts them into the single precision data structure. The function MatSetValuesBlocked_MPIBAIJ()
21: converts the entries into single precision and then calls ..._MatScalar() to put them
22: into the single precision data structures.
23: */
24: #if defined(PETSC_USE_MAT_SINGLE)
25: EXTERN PetscErrorCode MatSetValuesBlocked_SeqBAIJ_MatScalar(Mat,PetscInt,const PetscInt*,PetscInt,const PetscInt*,const MatScalar*,InsertMode);
26: EXTERN PetscErrorCode MatSetValues_MPIBAIJ_MatScalar(Mat,PetscInt,const PetscInt*,PetscInt,const PetscInt*,const MatScalar*,InsertMode);
27: EXTERN PetscErrorCode MatSetValuesBlocked_MPIBAIJ_MatScalar(Mat,PetscInt,const PetscInt*,PetscInt,const PetscInt*,const MatScalar*,InsertMode);
28: EXTERN PetscErrorCode MatSetValues_MPIBAIJ_HT_MatScalar(Mat,PetscInt,const PetscInt*,PetscInt,const PetscInt*,const MatScalar*,InsertMode);
29: EXTERN PetscErrorCode MatSetValuesBlocked_MPIBAIJ_HT_MatScalar(Mat,PetscInt,const PetscInt*,PetscInt,const PetscInt*,const MatScalar*,InsertMode);
30: #else
31: #define MatSetValuesBlocked_SeqBAIJ_MatScalar MatSetValuesBlocked_SeqBAIJ
32: #define MatSetValues_MPIBAIJ_MatScalar MatSetValues_MPIBAIJ
33: #define MatSetValuesBlocked_MPIBAIJ_MatScalar MatSetValuesBlocked_MPIBAIJ
34: #define MatSetValues_MPIBAIJ_HT_MatScalar MatSetValues_MPIBAIJ_HT
35: #define MatSetValuesBlocked_MPIBAIJ_HT_MatScalar MatSetValuesBlocked_MPIBAIJ_HT
36: #endif
40: PetscErrorCode MatGetRowMax_MPIBAIJ(Mat A,Vec v)
41: {
42: Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data;
44: PetscInt i;
45: PetscScalar *va,*vb;
46: Vec vtmp;
49:
50: MatGetRowMax(a->A,v);
51: VecGetArray(v,&va);
53: VecCreateSeq(PETSC_COMM_SELF,A->m,&vtmp);
54: MatGetRowMax(a->B,vtmp);
55: VecGetArray(vtmp,&vb);
57: for (i=0; i<A->m; i++){
58: if (PetscAbsScalar(va[i]) < PetscAbsScalar(vb[i])) va[i] = vb[i];
59: }
61: VecRestoreArray(v,&va);
62: VecRestoreArray(vtmp,&vb);
63: VecDestroy(vtmp);
64:
65: return(0);
66: }
71: PetscErrorCode PETSCMAT_DLLEXPORT MatStoreValues_MPIBAIJ(Mat mat)
72: {
73: Mat_MPIBAIJ *aij = (Mat_MPIBAIJ *)mat->data;
77: MatStoreValues(aij->A);
78: MatStoreValues(aij->B);
79: return(0);
80: }
86: PetscErrorCode PETSCMAT_DLLEXPORT MatRetrieveValues_MPIBAIJ(Mat mat)
87: {
88: Mat_MPIBAIJ *aij = (Mat_MPIBAIJ *)mat->data;
92: MatRetrieveValues(aij->A);
93: MatRetrieveValues(aij->B);
94: return(0);
95: }
98: /*
99: Local utility routine that creates a mapping from the global column
100: number to the local number in the off-diagonal part of the local
101: storage of the matrix. This is done in a non scable way since the
102: length of colmap equals the global matrix length.
103: */
106: PetscErrorCode CreateColmap_MPIBAIJ_Private(Mat mat)
107: {
108: Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data;
109: Mat_SeqBAIJ *B = (Mat_SeqBAIJ*)baij->B->data;
111: PetscInt nbs = B->nbs,i,bs=mat->bs;
114: #if defined (PETSC_USE_CTABLE)
115: PetscTableCreate(baij->nbs,&baij->colmap);
116: for (i=0; i<nbs; i++){
117: PetscTableAdd(baij->colmap,baij->garray[i]+1,i*bs+1);
118: }
119: #else
120: PetscMalloc((baij->Nbs+1)*sizeof(PetscInt),&baij->colmap);
121: PetscLogObjectMemory(mat,baij->Nbs*sizeof(PetscInt));
122: PetscMemzero(baij->colmap,baij->Nbs*sizeof(PetscInt));
123: for (i=0; i<nbs; i++) baij->colmap[baij->garray[i]] = i*bs+1;
124: #endif
125: return(0);
126: }
128: #define CHUNKSIZE 10
130: #define MatSetValues_SeqBAIJ_A_Private(row,col,value,addv) \
131: { \
132: \
133: brow = row/bs; \
134: rp = aj + ai[brow]; ap = aa + bs2*ai[brow]; \
135: rmax = aimax[brow]; nrow = ailen[brow]; \
136: bcol = col/bs; \
137: ridx = row % bs; cidx = col % bs; \
138: low = 0; high = nrow; \
139: while (high-low > 3) { \
140: t = (low+high)/2; \
141: if (rp[t] > bcol) high = t; \
142: else low = t; \
143: } \
144: for (_i=low; _i<high; _i++) { \
145: if (rp[_i] > bcol) break; \
146: if (rp[_i] == bcol) { \
147: bap = ap + bs2*_i + bs*cidx + ridx; \
148: if (addv == ADD_VALUES) *bap += value; \
149: else *bap = value; \
150: goto a_noinsert; \
151: } \
152: } \
153: if (a->nonew == 1) goto a_noinsert; \
154: if (a->nonew == -1) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero (%D, %D) into matrix", row, col); \
155: MatSeqXAIJReallocateAIJ(a,bs2,nrow,brow,bcol,rmax,aa,ai,aj,a->mbs,rp,ap,aimax,a->nonew); \
156: N = nrow++ - 1; \
157: /* shift up all the later entries in this row */ \
158: for (ii=N; ii>=_i; ii--) { \
159: rp[ii+1] = rp[ii]; \
160: PetscMemcpy(ap+bs2*(ii+1),ap+bs2*(ii),bs2*sizeof(MatScalar)); \
161: } \
162: if (N>=_i) { PetscMemzero(ap+bs2*_i,bs2*sizeof(MatScalar)); } \
163: rp[_i] = bcol; \
164: ap[bs2*_i + bs*cidx + ridx] = value; \
165: a_noinsert:; \
166: ailen[brow] = nrow; \
167: }
169: #define MatSetValues_SeqBAIJ_B_Private(row,col,value,addv) \
170: { \
171: brow = row/bs; \
172: rp = bj + bi[brow]; ap = ba + bs2*bi[brow]; \
173: rmax = bimax[brow]; nrow = bilen[brow]; \
174: bcol = col/bs; \
175: ridx = row % bs; cidx = col % bs; \
176: low = 0; high = nrow; \
177: while (high-low > 3) { \
178: t = (low+high)/2; \
179: if (rp[t] > bcol) high = t; \
180: else low = t; \
181: } \
182: for (_i=low; _i<high; _i++) { \
183: if (rp[_i] > bcol) break; \
184: if (rp[_i] == bcol) { \
185: bap = ap + bs2*_i + bs*cidx + ridx; \
186: if (addv == ADD_VALUES) *bap += value; \
187: else *bap = value; \
188: goto b_noinsert; \
189: } \
190: } \
191: if (b->nonew == 1) goto b_noinsert; \
192: if (b->nonew == -1) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero (%D, %D) into matrix", row, col); \
193: MatSeqXAIJReallocateAIJ(b,bs2,nrow,brow,bcol,rmax,ba,bi,bj,b->mbs,rp,ap,bimax,b->nonew); \
194: CHKMEMQ;\
195: N = nrow++ - 1; \
196: /* shift up all the later entries in this row */ \
197: for (ii=N; ii>=_i; ii--) { \
198: rp[ii+1] = rp[ii]; \
199: PetscMemcpy(ap+bs2*(ii+1),ap+bs2*(ii),bs2*sizeof(MatScalar)); \
200: } \
201: if (N>=_i) { PetscMemzero(ap+bs2*_i,bs2*sizeof(MatScalar));} \
202: rp[_i] = bcol; \
203: ap[bs2*_i + bs*cidx + ridx] = value; \
204: b_noinsert:; \
205: bilen[brow] = nrow; \
206: }
208: #if defined(PETSC_USE_MAT_SINGLE)
211: PetscErrorCode MatSetValues_MPIBAIJ(Mat mat,PetscInt m,const PetscInt im[],PetscInt n,const PetscInt in[],const PetscScalar v[],InsertMode addv)
212: {
213: Mat_MPIBAIJ *b = (Mat_MPIBAIJ*)mat->data;
215: PetscInt i,N = m*n;
216: MatScalar *vsingle;
219: if (N > b->setvalueslen) {
220: if (b->setvaluescopy) {PetscFree(b->setvaluescopy);}
221: PetscMalloc(N*sizeof(MatScalar),&b->setvaluescopy);
222: b->setvalueslen = N;
223: }
224: vsingle = b->setvaluescopy;
226: for (i=0; i<N; i++) {
227: vsingle[i] = v[i];
228: }
229: MatSetValues_MPIBAIJ_MatScalar(mat,m,im,n,in,vsingle,addv);
230: return(0);
231: }
235: PetscErrorCode MatSetValuesBlocked_MPIBAIJ(Mat mat,PetscInt m,const PetscInt im[],PetscInt n,const PetscInt in[],const PetscScalar v[],InsertMode addv)
236: {
237: Mat_MPIBAIJ *b = (Mat_MPIBAIJ*)mat->data;
239: PetscInt i,N = m*n*b->bs2;
240: MatScalar *vsingle;
243: if (N > b->setvalueslen) {
244: if (b->setvaluescopy) {PetscFree(b->setvaluescopy);}
245: PetscMalloc(N*sizeof(MatScalar),&b->setvaluescopy);
246: b->setvalueslen = N;
247: }
248: vsingle = b->setvaluescopy;
249: for (i=0; i<N; i++) {
250: vsingle[i] = v[i];
251: }
252: MatSetValuesBlocked_MPIBAIJ_MatScalar(mat,m,im,n,in,vsingle,addv);
253: return(0);
254: }
258: PetscErrorCode MatSetValues_MPIBAIJ_HT(Mat mat,PetscInt m,const PetscInt im[],PetscInt n,const PetscInt in[],const PetscScalar v[],InsertMode addv)
259: {
260: Mat_MPIBAIJ *b = (Mat_MPIBAIJ*)mat->data;
262: PetscInt i,N = m*n;
263: MatScalar *vsingle;
266: if (N > b->setvalueslen) {
267: if (b->setvaluescopy) {PetscFree(b->setvaluescopy);}
268: PetscMalloc(N*sizeof(MatScalar),&b->setvaluescopy);
269: b->setvalueslen = N;
270: }
271: vsingle = b->setvaluescopy;
272: for (i=0; i<N; i++) {
273: vsingle[i] = v[i];
274: }
275: MatSetValues_MPIBAIJ_HT_MatScalar(mat,m,im,n,in,vsingle,addv);
276: return(0);
277: }
281: PetscErrorCode MatSetValuesBlocked_MPIBAIJ_HT(Mat mat,PetscInt m,const PetscInt im[],PetscInt n,const PetscInt in[],const PetscScalar v[],InsertMode addv)
282: {
283: Mat_MPIBAIJ *b = (Mat_MPIBAIJ*)mat->data;
285: PetscInt i,N = m*n*b->bs2;
286: MatScalar *vsingle;
289: if (N > b->setvalueslen) {
290: if (b->setvaluescopy) {PetscFree(b->setvaluescopy);}
291: PetscMalloc(N*sizeof(MatScalar),&b->setvaluescopy);
292: b->setvalueslen = N;
293: }
294: vsingle = b->setvaluescopy;
295: for (i=0; i<N; i++) {
296: vsingle[i] = v[i];
297: }
298: MatSetValuesBlocked_MPIBAIJ_HT_MatScalar(mat,m,im,n,in,vsingle,addv);
299: return(0);
300: }
301: #endif
305: PetscErrorCode MatSetValues_MPIBAIJ_MatScalar(Mat mat,PetscInt m,const PetscInt im[],PetscInt n,const PetscInt in[],const MatScalar v[],InsertMode addv)
306: {
307: Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data;
308: MatScalar value;
309: PetscTruth roworiented = baij->roworiented;
311: PetscInt i,j,row,col;
312: PetscInt rstart_orig=baij->rstart_bs;
313: PetscInt rend_orig=baij->rend_bs,cstart_orig=baij->cstart_bs;
314: PetscInt cend_orig=baij->cend_bs,bs=mat->bs;
316: /* Some Variables required in the macro */
317: Mat A = baij->A;
318: Mat_SeqBAIJ *a = (Mat_SeqBAIJ*)(A)->data;
319: PetscInt *aimax=a->imax,*ai=a->i,*ailen=a->ilen,*aj=a->j;
320: MatScalar *aa=a->a;
322: Mat B = baij->B;
323: Mat_SeqBAIJ *b = (Mat_SeqBAIJ*)(B)->data;
324: PetscInt *bimax=b->imax,*bi=b->i,*bilen=b->ilen,*bj=b->j;
325: MatScalar *ba=b->a;
327: PetscInt *rp,ii,nrow,_i,rmax,N,brow,bcol;
328: PetscInt low,high,t,ridx,cidx,bs2=a->bs2;
329: MatScalar *ap,*bap;
332: for (i=0; i<m; i++) {
333: if (im[i] < 0) continue;
334: #if defined(PETSC_USE_DEBUG)
335: if (im[i] >= mat->M) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",im[i],mat->M-1);
336: #endif
337: if (im[i] >= rstart_orig && im[i] < rend_orig) {
338: row = im[i] - rstart_orig;
339: for (j=0; j<n; j++) {
340: if (in[j] >= cstart_orig && in[j] < cend_orig){
341: col = in[j] - cstart_orig;
342: if (roworiented) value = v[i*n+j]; else value = v[i+j*m];
343: MatSetValues_SeqBAIJ_A_Private(row,col,value,addv);
344: /* MatSetValues_SeqBAIJ(baij->A,1,&row,1,&col,&value,addv); */
345: } else if (in[j] < 0) continue;
346: #if defined(PETSC_USE_DEBUG)
347: else if (in[j] >= mat->N) {SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",in[i],mat->N-1);}
348: #endif
349: else {
350: if (mat->was_assembled) {
351: if (!baij->colmap) {
352: CreateColmap_MPIBAIJ_Private(mat);
353: }
354: #if defined (PETSC_USE_CTABLE)
355: PetscTableFind(baij->colmap,in[j]/bs + 1,&col);
356: col = col - 1;
357: #else
358: col = baij->colmap[in[j]/bs] - 1;
359: #endif
360: if (col < 0 && !((Mat_SeqBAIJ*)(baij->A->data))->nonew) {
361: DisAssemble_MPIBAIJ(mat);
362: col = in[j];
363: /* Reinitialize the variables required by MatSetValues_SeqBAIJ_B_Private() */
364: B = baij->B;
365: b = (Mat_SeqBAIJ*)(B)->data;
366: bimax=b->imax;bi=b->i;bilen=b->ilen;bj=b->j;
367: ba=b->a;
368: } else col += in[j]%bs;
369: } else col = in[j];
370: if (roworiented) value = v[i*n+j]; else value = v[i+j*m];
371: MatSetValues_SeqBAIJ_B_Private(row,col,value,addv);
372: /* MatSetValues_SeqBAIJ(baij->B,1,&row,1,&col,&value,addv); */
373: }
374: }
375: } else {
376: if (!baij->donotstash) {
377: if (roworiented) {
378: MatStashValuesRow_Private(&mat->stash,im[i],n,in,v+i*n);
379: } else {
380: MatStashValuesCol_Private(&mat->stash,im[i],n,in,v+i,m);
381: }
382: }
383: }
384: }
385: return(0);
386: }
390: PetscErrorCode MatSetValuesBlocked_MPIBAIJ_MatScalar(Mat mat,PetscInt m,const PetscInt im[],PetscInt n,const PetscInt in[],const MatScalar v[],InsertMode addv)
391: {
392: Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data;
393: const MatScalar *value;
394: MatScalar *barray=baij->barray;
395: PetscTruth roworiented = baij->roworiented;
396: PetscErrorCode ierr;
397: PetscInt i,j,ii,jj,row,col,rstart=baij->rstart;
398: PetscInt rend=baij->rend,cstart=baij->cstart,stepval;
399: PetscInt cend=baij->cend,bs=mat->bs,bs2=baij->bs2;
400:
402: if(!barray) {
403: PetscMalloc(bs2*sizeof(MatScalar),&barray);
404: baij->barray = barray;
405: }
407: if (roworiented) {
408: stepval = (n-1)*bs;
409: } else {
410: stepval = (m-1)*bs;
411: }
412: for (i=0; i<m; i++) {
413: if (im[i] < 0) continue;
414: #if defined(PETSC_USE_DEBUG)
415: if (im[i] >= baij->Mbs) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Row too large, row %D max %D",im[i],baij->Mbs-1);
416: #endif
417: if (im[i] >= rstart && im[i] < rend) {
418: row = im[i] - rstart;
419: for (j=0; j<n; j++) {
420: /* If NumCol = 1 then a copy is not required */
421: if ((roworiented) && (n == 1)) {
422: barray = (MatScalar*)v + i*bs2;
423: } else if((!roworiented) && (m == 1)) {
424: barray = (MatScalar*)v + j*bs2;
425: } else { /* Here a copy is required */
426: if (roworiented) {
427: value = v + i*(stepval+bs)*bs + j*bs;
428: } else {
429: value = v + j*(stepval+bs)*bs + i*bs;
430: }
431: for (ii=0; ii<bs; ii++,value+=stepval) {
432: for (jj=0; jj<bs; jj++) {
433: *barray++ = *value++;
434: }
435: }
436: barray -=bs2;
437: }
438:
439: if (in[j] >= cstart && in[j] < cend){
440: col = in[j] - cstart;
441: MatSetValuesBlocked_SeqBAIJ_MatScalar(baij->A,1,&row,1,&col,barray,addv);
442: }
443: else if (in[j] < 0) continue;
444: #if defined(PETSC_USE_DEBUG)
445: else if (in[j] >= baij->Nbs) {SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Column too large, col %D max %D",in[j],baij->Nbs-1);}
446: #endif
447: else {
448: if (mat->was_assembled) {
449: if (!baij->colmap) {
450: CreateColmap_MPIBAIJ_Private(mat);
451: }
453: #if defined(PETSC_USE_DEBUG)
454: #if defined (PETSC_USE_CTABLE)
455: { PetscInt data;
456: PetscTableFind(baij->colmap,in[j]+1,&data);
457: if ((data - 1) % bs) SETERRQ(PETSC_ERR_PLIB,"Incorrect colmap");
458: }
459: #else
460: if ((baij->colmap[in[j]] - 1) % bs) SETERRQ(PETSC_ERR_PLIB,"Incorrect colmap");
461: #endif
462: #endif
463: #if defined (PETSC_USE_CTABLE)
464: PetscTableFind(baij->colmap,in[j]+1,&col);
465: col = (col - 1)/bs;
466: #else
467: col = (baij->colmap[in[j]] - 1)/bs;
468: #endif
469: if (col < 0 && !((Mat_SeqBAIJ*)(baij->A->data))->nonew) {
470: DisAssemble_MPIBAIJ(mat);
471: col = in[j];
472: }
473: }
474: else col = in[j];
475: MatSetValuesBlocked_SeqBAIJ_MatScalar(baij->B,1,&row,1,&col,barray,addv);
476: }
477: }
478: } else {
479: if (!baij->donotstash) {
480: if (roworiented) {
481: MatStashValuesRowBlocked_Private(&mat->bstash,im[i],n,in,v,m,n,i);
482: } else {
483: MatStashValuesColBlocked_Private(&mat->bstash,im[i],n,in,v,m,n,i);
484: }
485: }
486: }
487: }
488: return(0);
489: }
491: #define HASH_KEY 0.6180339887
492: #define HASH(size,key,tmp) (tmp = (key)*HASH_KEY,(PetscInt)((size)*(tmp-(PetscInt)tmp)))
493: /* #define HASH(size,key) ((PetscInt)((size)*fmod(((key)*HASH_KEY),1))) */
494: /* #define HASH(size,key,tmp) ((PetscInt)((size)*fmod(((key)*HASH_KEY),1))) */
497: PetscErrorCode MatSetValues_MPIBAIJ_HT_MatScalar(Mat mat,PetscInt m,const PetscInt im[],PetscInt n,const PetscInt in[],const MatScalar v[],InsertMode addv)
498: {
499: Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data;
500: PetscTruth roworiented = baij->roworiented;
502: PetscInt i,j,row,col;
503: PetscInt rstart_orig=baij->rstart_bs;
504: PetscInt rend_orig=baij->rend_bs,Nbs=baij->Nbs;
505: PetscInt h1,key,size=baij->ht_size,bs=mat->bs,*HT=baij->ht,idx;
506: PetscReal tmp;
507: MatScalar **HD = baij->hd,value;
508: #if defined(PETSC_USE_DEBUG)
509: PetscInt total_ct=baij->ht_total_ct,insert_ct=baij->ht_insert_ct;
510: #endif
514: for (i=0; i<m; i++) {
515: #if defined(PETSC_USE_DEBUG)
516: if (im[i] < 0) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Negative row");
517: if (im[i] >= mat->M) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",im[i],mat->M-1);
518: #endif
519: row = im[i];
520: if (row >= rstart_orig && row < rend_orig) {
521: for (j=0; j<n; j++) {
522: col = in[j];
523: if (roworiented) value = v[i*n+j]; else value = v[i+j*m];
524: /* Look up PetscInto the Hash Table */
525: key = (row/bs)*Nbs+(col/bs)+1;
526: h1 = HASH(size,key,tmp);
528:
529: idx = h1;
530: #if defined(PETSC_USE_DEBUG)
531: insert_ct++;
532: total_ct++;
533: if (HT[idx] != key) {
534: for (idx=h1; (idx<size) && (HT[idx]!=key); idx++,total_ct++);
535: if (idx == size) {
536: for (idx=0; (idx<h1) && (HT[idx]!=key); idx++,total_ct++);
537: if (idx == h1) {
538: SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"(%D,%D) has no entry in the hash table", row, col);
539: }
540: }
541: }
542: #else
543: if (HT[idx] != key) {
544: for (idx=h1; (idx<size) && (HT[idx]!=key); idx++);
545: if (idx == size) {
546: for (idx=0; (idx<h1) && (HT[idx]!=key); idx++);
547: if (idx == h1) {
548: SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"(%D,%D) has no entry in the hash table", row, col);
549: }
550: }
551: }
552: #endif
553: /* A HASH table entry is found, so insert the values at the correct address */
554: if (addv == ADD_VALUES) *(HD[idx]+ (col % bs)*bs + (row % bs)) += value;
555: else *(HD[idx]+ (col % bs)*bs + (row % bs)) = value;
556: }
557: } else {
558: if (!baij->donotstash) {
559: if (roworiented) {
560: MatStashValuesRow_Private(&mat->stash,im[i],n,in,v+i*n);
561: } else {
562: MatStashValuesCol_Private(&mat->stash,im[i],n,in,v+i,m);
563: }
564: }
565: }
566: }
567: #if defined(PETSC_USE_DEBUG)
568: baij->ht_total_ct = total_ct;
569: baij->ht_insert_ct = insert_ct;
570: #endif
571: return(0);
572: }
576: PetscErrorCode MatSetValuesBlocked_MPIBAIJ_HT_MatScalar(Mat mat,PetscInt m,const PetscInt im[],PetscInt n,const PetscInt in[],const MatScalar v[],InsertMode addv)
577: {
578: Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data;
579: PetscTruth roworiented = baij->roworiented;
580: PetscErrorCode ierr;
581: PetscInt i,j,ii,jj,row,col;
582: PetscInt rstart=baij->rstart ;
583: PetscInt rend=baij->rend,stepval,bs=mat->bs,bs2=baij->bs2;
584: PetscInt h1,key,size=baij->ht_size,idx,*HT=baij->ht,Nbs=baij->Nbs;
585: PetscReal tmp;
586: MatScalar **HD = baij->hd,*baij_a;
587: const MatScalar *v_t,*value;
588: #if defined(PETSC_USE_DEBUG)
589: PetscInt total_ct=baij->ht_total_ct,insert_ct=baij->ht_insert_ct;
590: #endif
591:
594: if (roworiented) {
595: stepval = (n-1)*bs;
596: } else {
597: stepval = (m-1)*bs;
598: }
599: for (i=0; i<m; i++) {
600: #if defined(PETSC_USE_DEBUG)
601: if (im[i] < 0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"Negative row: %D",im[i]);
602: if (im[i] >= baij->Mbs) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",im[i],baij->Mbs-1);
603: #endif
604: row = im[i];
605: v_t = v + i*bs2;
606: if (row >= rstart && row < rend) {
607: for (j=0; j<n; j++) {
608: col = in[j];
610: /* Look up into the Hash Table */
611: key = row*Nbs+col+1;
612: h1 = HASH(size,key,tmp);
613:
614: idx = h1;
615: #if defined(PETSC_USE_DEBUG)
616: total_ct++;
617: insert_ct++;
618: if (HT[idx] != key) {
619: for (idx=h1; (idx<size) && (HT[idx]!=key); idx++,total_ct++);
620: if (idx == size) {
621: for (idx=0; (idx<h1) && (HT[idx]!=key); idx++,total_ct++);
622: if (idx == h1) {
623: SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"(%D,%D) has no entry in the hash table", row, col);
624: }
625: }
626: }
627: #else
628: if (HT[idx] != key) {
629: for (idx=h1; (idx<size) && (HT[idx]!=key); idx++);
630: if (idx == size) {
631: for (idx=0; (idx<h1) && (HT[idx]!=key); idx++);
632: if (idx == h1) {
633: SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"(%D,%D) has no entry in the hash table", row, col);
634: }
635: }
636: }
637: #endif
638: baij_a = HD[idx];
639: if (roworiented) {
640: /*value = v + i*(stepval+bs)*bs + j*bs;*/
641: /* value = v + (i*(stepval+bs)+j)*bs; */
642: value = v_t;
643: v_t += bs;
644: if (addv == ADD_VALUES) {
645: for (ii=0; ii<bs; ii++,value+=stepval) {
646: for (jj=ii; jj<bs2; jj+=bs) {
647: baij_a[jj] += *value++;
648: }
649: }
650: } else {
651: for (ii=0; ii<bs; ii++,value+=stepval) {
652: for (jj=ii; jj<bs2; jj+=bs) {
653: baij_a[jj] = *value++;
654: }
655: }
656: }
657: } else {
658: value = v + j*(stepval+bs)*bs + i*bs;
659: if (addv == ADD_VALUES) {
660: for (ii=0; ii<bs; ii++,value+=stepval,baij_a+=bs) {
661: for (jj=0; jj<bs; jj++) {
662: baij_a[jj] += *value++;
663: }
664: }
665: } else {
666: for (ii=0; ii<bs; ii++,value+=stepval,baij_a+=bs) {
667: for (jj=0; jj<bs; jj++) {
668: baij_a[jj] = *value++;
669: }
670: }
671: }
672: }
673: }
674: } else {
675: if (!baij->donotstash) {
676: if (roworiented) {
677: MatStashValuesRowBlocked_Private(&mat->bstash,im[i],n,in,v,m,n,i);
678: } else {
679: MatStashValuesColBlocked_Private(&mat->bstash,im[i],n,in,v,m,n,i);
680: }
681: }
682: }
683: }
684: #if defined(PETSC_USE_DEBUG)
685: baij->ht_total_ct = total_ct;
686: baij->ht_insert_ct = insert_ct;
687: #endif
688: return(0);
689: }
693: PetscErrorCode MatGetValues_MPIBAIJ(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],PetscScalar v[])
694: {
695: Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data;
697: PetscInt bs=mat->bs,i,j,bsrstart = baij->rstart*bs,bsrend = baij->rend*bs;
698: PetscInt bscstart = baij->cstart*bs,bscend = baij->cend*bs,row,col,data;
701: for (i=0; i<m; i++) {
702: if (idxm[i] < 0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"Negative row: %D",idxm[i]);
703: if (idxm[i] >= mat->M) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",idxm[i],mat->M-1);
704: if (idxm[i] >= bsrstart && idxm[i] < bsrend) {
705: row = idxm[i] - bsrstart;
706: for (j=0; j<n; j++) {
707: if (idxn[j] < 0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"Negative column: %D",idxn[j]);
708: if (idxn[j] >= mat->N) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",idxn[j],mat->N-1);
709: if (idxn[j] >= bscstart && idxn[j] < bscend){
710: col = idxn[j] - bscstart;
711: MatGetValues_SeqBAIJ(baij->A,1,&row,1,&col,v+i*n+j);
712: } else {
713: if (!baij->colmap) {
714: CreateColmap_MPIBAIJ_Private(mat);
715: }
716: #if defined (PETSC_USE_CTABLE)
717: PetscTableFind(baij->colmap,idxn[j]/bs+1,&data);
718: data --;
719: #else
720: data = baij->colmap[idxn[j]/bs]-1;
721: #endif
722: if((data < 0) || (baij->garray[data/bs] != idxn[j]/bs)) *(v+i*n+j) = 0.0;
723: else {
724: col = data + idxn[j]%bs;
725: MatGetValues_SeqBAIJ(baij->B,1,&row,1,&col,v+i*n+j);
726: }
727: }
728: }
729: } else {
730: SETERRQ(PETSC_ERR_SUP,"Only local values currently supported");
731: }
732: }
733: return(0);
734: }
738: PetscErrorCode MatNorm_MPIBAIJ(Mat mat,NormType type,PetscReal *nrm)
739: {
740: Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data;
741: Mat_SeqBAIJ *amat = (Mat_SeqBAIJ*)baij->A->data,*bmat = (Mat_SeqBAIJ*)baij->B->data;
743: PetscInt i,j,bs2=baij->bs2,bs=baij->A->bs,nz,row,col;
744: PetscReal sum = 0.0;
745: MatScalar *v;
748: if (baij->size == 1) {
749: MatNorm(baij->A,type,nrm);
750: } else {
751: if (type == NORM_FROBENIUS) {
752: v = amat->a;
753: nz = amat->nz*bs2;
754: for (i=0; i<nz; i++) {
755: #if defined(PETSC_USE_COMPLEX)
756: sum += PetscRealPart(PetscConj(*v)*(*v)); v++;
757: #else
758: sum += (*v)*(*v); v++;
759: #endif
760: }
761: v = bmat->a;
762: nz = bmat->nz*bs2;
763: for (i=0; i<nz; i++) {
764: #if defined(PETSC_USE_COMPLEX)
765: sum += PetscRealPart(PetscConj(*v)*(*v)); v++;
766: #else
767: sum += (*v)*(*v); v++;
768: #endif
769: }
770: MPI_Allreduce(&sum,nrm,1,MPIU_REAL,MPI_SUM,mat->comm);
771: *nrm = sqrt(*nrm);
772: } else if (type == NORM_1) { /* max column sum */
773: PetscReal *tmp,*tmp2;
774: PetscInt *jj,*garray=baij->garray,cstart=baij->cstart;
775: PetscMalloc((2*mat->N+1)*sizeof(PetscReal),&tmp);
776: tmp2 = tmp + mat->N;
777: PetscMemzero(tmp,mat->N*sizeof(PetscReal));
778: v = amat->a; jj = amat->j;
779: for (i=0; i<amat->nz; i++) {
780: for (j=0; j<bs; j++){
781: col = bs*(cstart + *jj) + j; /* column index */
782: for (row=0; row<bs; row++){
783: tmp[col] += PetscAbsScalar(*v); v++;
784: }
785: }
786: jj++;
787: }
788: v = bmat->a; jj = bmat->j;
789: for (i=0; i<bmat->nz; i++) {
790: for (j=0; j<bs; j++){
791: col = bs*garray[*jj] + j;
792: for (row=0; row<bs; row++){
793: tmp[col] += PetscAbsScalar(*v); v++;
794: }
795: }
796: jj++;
797: }
798: MPI_Allreduce(tmp,tmp2,mat->N,MPIU_REAL,MPI_SUM,mat->comm);
799: *nrm = 0.0;
800: for (j=0; j<mat->N; j++) {
801: if (tmp2[j] > *nrm) *nrm = tmp2[j];
802: }
803: PetscFree(tmp);
804: } else if (type == NORM_INFINITY) { /* max row sum */
805: PetscReal *sums;
806: PetscMalloc(bs*sizeof(PetscReal),&sums);CHKERRQ(ierr)
807: sum = 0.0;
808: for (j=0; j<amat->mbs; j++) {
809: for (row=0; row<bs; row++) sums[row] = 0.0;
810: v = amat->a + bs2*amat->i[j];
811: nz = amat->i[j+1]-amat->i[j];
812: for (i=0; i<nz; i++) {
813: for (col=0; col<bs; col++){
814: for (row=0; row<bs; row++){
815: sums[row] += PetscAbsScalar(*v); v++;
816: }
817: }
818: }
819: v = bmat->a + bs2*bmat->i[j];
820: nz = bmat->i[j+1]-bmat->i[j];
821: for (i=0; i<nz; i++) {
822: for (col=0; col<bs; col++){
823: for (row=0; row<bs; row++){
824: sums[row] += PetscAbsScalar(*v); v++;
825: }
826: }
827: }
828: for (row=0; row<bs; row++){
829: if (sums[row] > sum) sum = sums[row];
830: }
831: }
832: MPI_Allreduce(&sum,nrm,1,MPIU_REAL,MPI_MAX,mat->comm);
833: PetscFree(sums);
834: } else {
835: SETERRQ(PETSC_ERR_SUP,"No support for this norm yet");
836: }
837: }
838: return(0);
839: }
841: /*
842: Creates the hash table, and sets the table
843: This table is created only once.
844: If new entried need to be added to the matrix
845: then the hash table has to be destroyed and
846: recreated.
847: */
850: PetscErrorCode MatCreateHashTable_MPIBAIJ_Private(Mat mat,PetscReal factor)
851: {
852: Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data;
853: Mat A = baij->A,B=baij->B;
854: Mat_SeqBAIJ *a=(Mat_SeqBAIJ *)A->data,*b=(Mat_SeqBAIJ *)B->data;
855: PetscInt i,j,k,nz=a->nz+b->nz,h1,*ai=a->i,*aj=a->j,*bi=b->i,*bj=b->j;
857: PetscInt size,bs2=baij->bs2,rstart=baij->rstart;
858: PetscInt cstart=baij->cstart,*garray=baij->garray,row,col,Nbs=baij->Nbs;
859: PetscInt *HT,key;
860: MatScalar **HD;
861: PetscReal tmp;
862: #if defined(PETSC_USE_DEBUG)
863: PetscInt ct=0,max=0;
864: #endif
867: baij->ht_size=(PetscInt)(factor*nz);
868: size = baij->ht_size;
870: if (baij->ht) {
871: return(0);
872: }
873:
874: /* Allocate Memory for Hash Table */
875: PetscMalloc((size)*(sizeof(PetscInt)+sizeof(MatScalar*))+1,&baij->hd);
876: baij->ht = (PetscInt*)(baij->hd + size);
877: HD = baij->hd;
878: HT = baij->ht;
881: PetscMemzero(HD,size*(sizeof(PetscInt)+sizeof(PetscScalar*)));
882:
884: /* Loop Over A */
885: for (i=0; i<a->mbs; i++) {
886: for (j=ai[i]; j<ai[i+1]; j++) {
887: row = i+rstart;
888: col = aj[j]+cstart;
889:
890: key = row*Nbs + col + 1;
891: h1 = HASH(size,key,tmp);
892: for (k=0; k<size; k++){
893: if (!HT[(h1+k)%size]) {
894: HT[(h1+k)%size] = key;
895: HD[(h1+k)%size] = a->a + j*bs2;
896: break;
897: #if defined(PETSC_USE_DEBUG)
898: } else {
899: ct++;
900: #endif
901: }
902: }
903: #if defined(PETSC_USE_DEBUG)
904: if (k> max) max = k;
905: #endif
906: }
907: }
908: /* Loop Over B */
909: for (i=0; i<b->mbs; i++) {
910: for (j=bi[i]; j<bi[i+1]; j++) {
911: row = i+rstart;
912: col = garray[bj[j]];
913: key = row*Nbs + col + 1;
914: h1 = HASH(size,key,tmp);
915: for (k=0; k<size; k++){
916: if (!HT[(h1+k)%size]) {
917: HT[(h1+k)%size] = key;
918: HD[(h1+k)%size] = b->a + j*bs2;
919: break;
920: #if defined(PETSC_USE_DEBUG)
921: } else {
922: ct++;
923: #endif
924: }
925: }
926: #if defined(PETSC_USE_DEBUG)
927: if (k> max) max = k;
928: #endif
929: }
930: }
931:
932: /* Print Summary */
933: #if defined(PETSC_USE_DEBUG)
934: for (i=0,j=0; i<size; i++) {
935: if (HT[i]) {j++;}
936: }
937: PetscLogInfo((0,"MatCreateHashTable_MPIBAIJ_Private: Average Search = %5.2f,max search = %D\n",(!j)? 0.0:((PetscReal)(ct+j))/j,max));
938: #endif
939: return(0);
940: }
944: PetscErrorCode MatAssemblyBegin_MPIBAIJ(Mat mat,MatAssemblyType mode)
945: {
946: Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data;
948: PetscInt nstash,reallocs;
949: InsertMode addv;
952: if (baij->donotstash) {
953: return(0);
954: }
956: /* make sure all processors are either in INSERTMODE or ADDMODE */
957: MPI_Allreduce(&mat->insertmode,&addv,1,MPI_INT,MPI_BOR,mat->comm);
958: if (addv == (ADD_VALUES|INSERT_VALUES)) {
959: SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Some processors inserted others added");
960: }
961: mat->insertmode = addv; /* in case this processor had no cache */
963: MatStashScatterBegin_Private(&mat->stash,baij->rowners_bs);
964: MatStashScatterBegin_Private(&mat->bstash,baij->rowners);
965: MatStashGetInfo_Private(&mat->stash,&nstash,&reallocs);
966: PetscLogInfo((0,"MatAssemblyBegin_MPIBAIJ:Stash has %D entries,uses %D mallocs.\n",nstash,reallocs));
967: MatStashGetInfo_Private(&mat->bstash,&nstash,&reallocs);
968: PetscLogInfo((0,"MatAssemblyBegin_MPIBAIJ:Block-Stash has %D entries, uses %D mallocs.\n",nstash,reallocs));
969: return(0);
970: }
974: PetscErrorCode MatAssemblyEnd_MPIBAIJ(Mat mat,MatAssemblyType mode)
975: {
976: Mat_MPIBAIJ *baij=(Mat_MPIBAIJ*)mat->data;
977: Mat_SeqBAIJ *a=(Mat_SeqBAIJ*)baij->A->data,*b=(Mat_SeqBAIJ*)baij->B->data;
979: PetscInt i,j,rstart,ncols,flg,bs2=baij->bs2;
980: PetscInt *row,*col,other_disassembled;
981: PetscTruth r1,r2,r3;
982: MatScalar *val;
983: InsertMode addv = mat->insertmode;
984: PetscMPIInt n;
987: if (!baij->donotstash) {
988: while (1) {
989: MatStashScatterGetMesg_Private(&mat->stash,&n,&row,&col,&val,&flg);
990: if (!flg) break;
992: for (i=0; i<n;) {
993: /* Now identify the consecutive vals belonging to the same row */
994: for (j=i,rstart=row[j]; j<n; j++) { if (row[j] != rstart) break; }
995: if (j < n) ncols = j-i;
996: else ncols = n-i;
997: /* Now assemble all these values with a single function call */
998: MatSetValues_MPIBAIJ_MatScalar(mat,1,row+i,ncols,col+i,val+i,addv);
999: i = j;
1000: }
1001: }
1002: MatStashScatterEnd_Private(&mat->stash);
1003: /* Now process the block-stash. Since the values are stashed column-oriented,
1004: set the roworiented flag to column oriented, and after MatSetValues()
1005: restore the original flags */
1006: r1 = baij->roworiented;
1007: r2 = a->roworiented;
1008: r3 = b->roworiented;
1009: baij->roworiented = PETSC_FALSE;
1010: a->roworiented = PETSC_FALSE;
1011: b->roworiented = PETSC_FALSE;
1012: while (1) {
1013: MatStashScatterGetMesg_Private(&mat->bstash,&n,&row,&col,&val,&flg);
1014: if (!flg) break;
1015:
1016: for (i=0; i<n;) {
1017: /* Now identify the consecutive vals belonging to the same row */
1018: for (j=i,rstart=row[j]; j<n; j++) { if (row[j] != rstart) break; }
1019: if (j < n) ncols = j-i;
1020: else ncols = n-i;
1021: MatSetValuesBlocked_MPIBAIJ_MatScalar(mat,1,row+i,ncols,col+i,val+i*bs2,addv);
1022: i = j;
1023: }
1024: }
1025: MatStashScatterEnd_Private(&mat->bstash);
1026: baij->roworiented = r1;
1027: a->roworiented = r2;
1028: b->roworiented = r3;
1029: }
1030:
1031: MatAssemblyBegin(baij->A,mode);
1032: MatAssemblyEnd(baij->A,mode);
1034: /* determine if any processor has disassembled, if so we must
1035: also disassemble ourselfs, in order that we may reassemble. */
1036: /*
1037: if nonzero structure of submatrix B cannot change then we know that
1038: no processor disassembled thus we can skip this stuff
1039: */
1040: if (!((Mat_SeqBAIJ*)baij->B->data)->nonew) {
1041: MPI_Allreduce(&mat->was_assembled,&other_disassembled,1,MPI_INT,MPI_PROD,mat->comm);
1042: if (mat->was_assembled && !other_disassembled) {
1043: DisAssemble_MPIBAIJ(mat);
1044: }
1045: }
1047: if (!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) {
1048: MatSetUpMultiply_MPIBAIJ(mat);
1049: }
1050: b->compressedrow.use = PETSC_TRUE;
1051: MatAssemblyBegin(baij->B,mode);
1052: MatAssemblyEnd(baij->B,mode);
1053:
1054: #if defined(PETSC_USE_DEBUG)
1055: if (baij->ht && mode== MAT_FINAL_ASSEMBLY) {
1056: PetscLogInfo((0,"MatAssemblyEnd_MPIBAIJ:Average Hash Table Search in MatSetValues = %5.2f\n",((PetscReal)baij->ht_total_ct)/baij->ht_insert_ct));
1057: baij->ht_total_ct = 0;
1058: baij->ht_insert_ct = 0;
1059: }
1060: #endif
1061: if (baij->ht_flag && !baij->ht && mode == MAT_FINAL_ASSEMBLY) {
1062: MatCreateHashTable_MPIBAIJ_Private(mat,baij->ht_fact);
1063: mat->ops->setvalues = MatSetValues_MPIBAIJ_HT;
1064: mat->ops->setvaluesblocked = MatSetValuesBlocked_MPIBAIJ_HT;
1065: }
1067: if (baij->rowvalues) {
1068: PetscFree(baij->rowvalues);
1069: baij->rowvalues = 0;
1070: }
1071: return(0);
1072: }
1076: static PetscErrorCode MatView_MPIBAIJ_ASCIIorDraworSocket(Mat mat,PetscViewer viewer)
1077: {
1078: Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data;
1079: PetscErrorCode ierr;
1080: PetscMPIInt size = baij->size,rank = baij->rank;
1081: PetscInt bs = mat->bs;
1082: PetscTruth iascii,isdraw;
1083: PetscViewer sviewer;
1084: PetscViewerFormat format;
1087: /* printf(" MatView_MPIBAIJ_ASCIIorDraworSocket is called ...\n"); */
1088: PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&iascii);
1089: PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_DRAW,&isdraw);
1090: if (iascii) {
1091: PetscViewerGetFormat(viewer,&format);
1092: if (format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
1093: MatInfo info;
1094: MPI_Comm_rank(mat->comm,&rank);
1095: MatGetInfo(mat,MAT_LOCAL,&info);
1096: PetscViewerASCIISynchronizedPrintf(viewer,"[%d] Local rows %D nz %D nz alloced %D bs %D mem %D\n",
1097: rank,mat->m,(PetscInt)info.nz_used*bs,(PetscInt)info.nz_allocated*bs,
1098: mat->bs,(PetscInt)info.memory);
1099: MatGetInfo(baij->A,MAT_LOCAL,&info);
1100: PetscViewerASCIISynchronizedPrintf(viewer,"[%d] on-diagonal part: nz %D \n",rank,(PetscInt)info.nz_used*bs);
1101: MatGetInfo(baij->B,MAT_LOCAL,&info);
1102: PetscViewerASCIISynchronizedPrintf(viewer,"[%d] off-diagonal part: nz %D \n",rank,(PetscInt)info.nz_used*bs);
1103: PetscViewerFlush(viewer);
1104: VecScatterView(baij->Mvctx,viewer);
1105: return(0);
1106: } else if (format == PETSC_VIEWER_ASCII_INFO) {
1107: PetscViewerASCIIPrintf(viewer," block size is %D\n",bs);
1108: return(0);
1109: } else if (format == PETSC_VIEWER_ASCII_FACTOR_INFO) {
1110: return(0);
1111: }
1112: }
1114: if (isdraw) {
1115: PetscDraw draw;
1116: PetscTruth isnull;
1117: PetscViewerDrawGetDraw(viewer,0,&draw);
1118: PetscDrawIsNull(draw,&isnull); if (isnull) return(0);
1119: }
1121: if (size == 1) {
1122: PetscObjectSetName((PetscObject)baij->A,mat->name);
1123: MatView(baij->A,viewer);
1124: } else {
1125: /* assemble the entire matrix onto first processor. */
1126: Mat A;
1127: Mat_SeqBAIJ *Aloc;
1128: PetscInt M = mat->M,N = mat->N,*ai,*aj,col,i,j,k,*rvals,mbs = baij->mbs;
1129: MatScalar *a;
1131: /* Here we are creating a temporary matrix, so will assume MPIBAIJ is acceptable */
1132: /* Perhaps this should be the type of mat? */
1133: MatCreate(mat->comm,&A);
1134: if (!rank) {
1135: MatSetSizes(A,M,N,M,N);
1136: } else {
1137: MatSetSizes(A,0,0,M,N);
1138: }
1139: MatSetType(A,MATMPIBAIJ);
1140: MatMPIBAIJSetPreallocation(A,mat->bs,0,PETSC_NULL,0,PETSC_NULL);
1141: PetscLogObjectParent(mat,A);
1143: /* copy over the A part */
1144: Aloc = (Mat_SeqBAIJ*)baij->A->data;
1145: ai = Aloc->i; aj = Aloc->j; a = Aloc->a;
1146: PetscMalloc(bs*sizeof(PetscInt),&rvals);
1148: for (i=0; i<mbs; i++) {
1149: rvals[0] = bs*(baij->rstart + i);
1150: for (j=1; j<bs; j++) { rvals[j] = rvals[j-1] + 1; }
1151: for (j=ai[i]; j<ai[i+1]; j++) {
1152: col = (baij->cstart+aj[j])*bs;
1153: for (k=0; k<bs; k++) {
1154: MatSetValues_MPIBAIJ_MatScalar(A,bs,rvals,1,&col,a,INSERT_VALUES);
1155: col++; a += bs;
1156: }
1157: }
1158: }
1159: /* copy over the B part */
1160: Aloc = (Mat_SeqBAIJ*)baij->B->data;
1161: ai = Aloc->i; aj = Aloc->j; a = Aloc->a;
1162: for (i=0; i<mbs; i++) {
1163: rvals[0] = bs*(baij->rstart + i);
1164: for (j=1; j<bs; j++) { rvals[j] = rvals[j-1] + 1; }
1165: for (j=ai[i]; j<ai[i+1]; j++) {
1166: col = baij->garray[aj[j]]*bs;
1167: for (k=0; k<bs; k++) {
1168: MatSetValues_MPIBAIJ_MatScalar(A,bs,rvals,1,&col,a,INSERT_VALUES);
1169: col++; a += bs;
1170: }
1171: }
1172: }
1173: PetscFree(rvals);
1174: MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
1175: MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
1176: /*
1177: Everyone has to call to draw the matrix since the graphics waits are
1178: synchronized across all processors that share the PetscDraw object
1179: */
1180: PetscViewerGetSingleton(viewer,&sviewer);
1181: if (!rank) {
1182: PetscObjectSetName((PetscObject)((Mat_MPIBAIJ*)(A->data))->A,mat->name);
1183: MatView(((Mat_MPIBAIJ*)(A->data))->A,sviewer);
1184: }
1185: PetscViewerRestoreSingleton(viewer,&sviewer);
1186: MatDestroy(A);
1187: }
1188: return(0);
1189: }
1193: PetscErrorCode MatView_MPIBAIJ(Mat mat,PetscViewer viewer)
1194: {
1196: PetscTruth iascii,isdraw,issocket,isbinary;
1199: PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&iascii);
1200: PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_DRAW,&isdraw);
1201: PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_SOCKET,&issocket);
1202: PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_BINARY,&isbinary);
1203: if (iascii || isdraw || issocket || isbinary) {
1204: MatView_MPIBAIJ_ASCIIorDraworSocket(mat,viewer);
1205: } else {
1206: SETERRQ1(PETSC_ERR_SUP,"Viewer type %s not supported by MPIBAIJ matrices",((PetscObject)viewer)->type_name);
1207: }
1208: return(0);
1209: }
1213: PetscErrorCode MatDestroy_MPIBAIJ(Mat mat)
1214: {
1215: Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data;
1219: #if defined(PETSC_USE_LOG)
1220: PetscLogObjectState((PetscObject)mat,"Rows=%D,Cols=%D",mat->M,mat->N);
1221: #endif
1222: MatStashDestroy_Private(&mat->stash);
1223: MatStashDestroy_Private(&mat->bstash);
1224: PetscFree(baij->rowners);
1225: MatDestroy(baij->A);
1226: MatDestroy(baij->B);
1227: #if defined (PETSC_USE_CTABLE)
1228: if (baij->colmap) {PetscTableDelete(baij->colmap);}
1229: #else
1230: if (baij->colmap) {PetscFree(baij->colmap);}
1231: #endif
1232: if (baij->garray) {PetscFree(baij->garray);}
1233: if (baij->lvec) {VecDestroy(baij->lvec);}
1234: if (baij->Mvctx) {VecScatterDestroy(baij->Mvctx);}
1235: if (baij->rowvalues) {PetscFree(baij->rowvalues);}
1236: if (baij->barray) {PetscFree(baij->barray);}
1237: if (baij->hd) {PetscFree(baij->hd);}
1238: #if defined(PETSC_USE_MAT_SINGLE)
1239: if (baij->setvaluescopy) {PetscFree(baij->setvaluescopy);}
1240: #endif
1241: PetscFree(baij);
1243: PetscObjectComposeFunction((PetscObject)mat,"MatStoreValues_C","",PETSC_NULL);
1244: PetscObjectComposeFunction((PetscObject)mat,"MatRetrieveValues_C","",PETSC_NULL);
1245: PetscObjectComposeFunction((PetscObject)mat,"MatGetDiagonalBlock_C","",PETSC_NULL);
1246: PetscObjectComposeFunction((PetscObject)mat,"MatMPIBAIJSetPreallocation_C","",PETSC_NULL);
1247: PetscObjectComposeFunction((PetscObject)mat,"MatMPIBAIJSetPreallocationCSR_C","",PETSC_NULL);
1248: PetscObjectComposeFunction((PetscObject)mat,"MatDiagonalScaleLocal_C","",PETSC_NULL);
1249: PetscObjectComposeFunction((PetscObject)mat,"MatSetHashTableFactor_C","",PETSC_NULL);
1250: return(0);
1251: }
1255: PetscErrorCode MatMult_MPIBAIJ(Mat A,Vec xx,Vec yy)
1256: {
1257: Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data;
1259: PetscInt nt;
1262: VecGetLocalSize(xx,&nt);
1263: if (nt != A->n) {
1264: SETERRQ(PETSC_ERR_ARG_SIZ,"Incompatible partition of A and xx");
1265: }
1266: VecGetLocalSize(yy,&nt);
1267: if (nt != A->m) {
1268: SETERRQ(PETSC_ERR_ARG_SIZ,"Incompatible parition of A and yy");
1269: }
1270: VecScatterBegin(xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD,a->Mvctx);
1271: (*a->A->ops->mult)(a->A,xx,yy);
1272: VecScatterEnd(xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD,a->Mvctx);
1273: (*a->B->ops->multadd)(a->B,a->lvec,yy,yy);
1274: VecScatterPostRecvs(xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD,a->Mvctx);
1275: return(0);
1276: }
1280: PetscErrorCode MatMultAdd_MPIBAIJ(Mat A,Vec xx,Vec yy,Vec zz)
1281: {
1282: Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data;
1286: VecScatterBegin(xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD,a->Mvctx);
1287: (*a->A->ops->multadd)(a->A,xx,yy,zz);
1288: VecScatterEnd(xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD,a->Mvctx);
1289: (*a->B->ops->multadd)(a->B,a->lvec,zz,zz);
1290: return(0);
1291: }
1295: PetscErrorCode MatMultTranspose_MPIBAIJ(Mat A,Vec xx,Vec yy)
1296: {
1297: Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data;
1299: PetscTruth merged;
1302: VecScatterGetMerged(a->Mvctx,&merged);
1303: /* do nondiagonal part */
1304: (*a->B->ops->multtranspose)(a->B,xx,a->lvec);
1305: if (!merged) {
1306: /* send it on its way */
1307: VecScatterBegin(a->lvec,yy,ADD_VALUES,SCATTER_REVERSE,a->Mvctx);
1308: /* do local part */
1309: (*a->A->ops->multtranspose)(a->A,xx,yy);
1310: /* receive remote parts: note this assumes the values are not actually */
1311: /* inserted in yy until the next line */
1312: VecScatterEnd(a->lvec,yy,ADD_VALUES,SCATTER_REVERSE,a->Mvctx);
1313: } else {
1314: /* do local part */
1315: (*a->A->ops->multtranspose)(a->A,xx,yy);
1316: /* send it on its way */
1317: VecScatterBegin(a->lvec,yy,ADD_VALUES,SCATTER_REVERSE,a->Mvctx);
1318: /* values actually were received in the Begin() but we need to call this nop */
1319: VecScatterEnd(a->lvec,yy,ADD_VALUES,SCATTER_REVERSE,a->Mvctx);
1320: }
1321: return(0);
1322: }
1326: PetscErrorCode MatMultTransposeAdd_MPIBAIJ(Mat A,Vec xx,Vec yy,Vec zz)
1327: {
1328: Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data;
1332: /* do nondiagonal part */
1333: (*a->B->ops->multtranspose)(a->B,xx,a->lvec);
1334: /* send it on its way */
1335: VecScatterBegin(a->lvec,zz,ADD_VALUES,SCATTER_REVERSE,a->Mvctx);
1336: /* do local part */
1337: (*a->A->ops->multtransposeadd)(a->A,xx,yy,zz);
1338: /* receive remote parts: note this assumes the values are not actually */
1339: /* inserted in yy until the next line, which is true for my implementation*/
1340: /* but is not perhaps always true. */
1341: VecScatterEnd(a->lvec,zz,ADD_VALUES,SCATTER_REVERSE,a->Mvctx);
1342: return(0);
1343: }
1345: /*
1346: This only works correctly for square matrices where the subblock A->A is the
1347: diagonal block
1348: */
1351: PetscErrorCode MatGetDiagonal_MPIBAIJ(Mat A,Vec v)
1352: {
1353: Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data;
1357: if (A->M != A->N) SETERRQ(PETSC_ERR_SUP,"Supports only square matrix where A->A is diag block");
1358: MatGetDiagonal(a->A,v);
1359: return(0);
1360: }
1364: PetscErrorCode MatScale_MPIBAIJ(Mat A,PetscScalar aa)
1365: {
1366: Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data;
1370: MatScale(a->A,aa);
1371: MatScale(a->B,aa);
1372: return(0);
1373: }
1377: PetscErrorCode MatGetRow_MPIBAIJ(Mat matin,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
1378: {
1379: Mat_MPIBAIJ *mat = (Mat_MPIBAIJ*)matin->data;
1380: PetscScalar *vworkA,*vworkB,**pvA,**pvB,*v_p;
1382: PetscInt bs = matin->bs,bs2 = mat->bs2,i,*cworkA,*cworkB,**pcA,**pcB;
1383: PetscInt nztot,nzA,nzB,lrow,brstart = mat->rstart*bs,brend = mat->rend*bs;
1384: PetscInt *cmap,*idx_p,cstart = mat->cstart;
1387: if (mat->getrowactive) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Already active");
1388: mat->getrowactive = PETSC_TRUE;
1390: if (!mat->rowvalues && (idx || v)) {
1391: /*
1392: allocate enough space to hold information from the longest row.
1393: */
1394: Mat_SeqBAIJ *Aa = (Mat_SeqBAIJ*)mat->A->data,*Ba = (Mat_SeqBAIJ*)mat->B->data;
1395: PetscInt max = 1,mbs = mat->mbs,tmp;
1396: for (i=0; i<mbs; i++) {
1397: tmp = Aa->i[i+1] - Aa->i[i] + Ba->i[i+1] - Ba->i[i];
1398: if (max < tmp) { max = tmp; }
1399: }
1400: PetscMalloc(max*bs2*(sizeof(PetscInt)+sizeof(PetscScalar)),&mat->rowvalues);
1401: mat->rowindices = (PetscInt*)(mat->rowvalues + max*bs2);
1402: }
1403:
1404: if (row < brstart || row >= brend) SETERRQ(PETSC_ERR_SUP,"Only local rows")
1405: lrow = row - brstart;
1407: pvA = &vworkA; pcA = &cworkA; pvB = &vworkB; pcB = &cworkB;
1408: if (!v) {pvA = 0; pvB = 0;}
1409: if (!idx) {pcA = 0; if (!v) pcB = 0;}
1410: (*mat->A->ops->getrow)(mat->A,lrow,&nzA,pcA,pvA);
1411: (*mat->B->ops->getrow)(mat->B,lrow,&nzB,pcB,pvB);
1412: nztot = nzA + nzB;
1414: cmap = mat->garray;
1415: if (v || idx) {
1416: if (nztot) {
1417: /* Sort by increasing column numbers, assuming A and B already sorted */
1418: PetscInt imark = -1;
1419: if (v) {
1420: *v = v_p = mat->rowvalues;
1421: for (i=0; i<nzB; i++) {
1422: if (cmap[cworkB[i]/bs] < cstart) v_p[i] = vworkB[i];
1423: else break;
1424: }
1425: imark = i;
1426: for (i=0; i<nzA; i++) v_p[imark+i] = vworkA[i];
1427: for (i=imark; i<nzB; i++) v_p[nzA+i] = vworkB[i];
1428: }
1429: if (idx) {
1430: *idx = idx_p = mat->rowindices;
1431: if (imark > -1) {
1432: for (i=0; i<imark; i++) {
1433: idx_p[i] = cmap[cworkB[i]/bs]*bs + cworkB[i]%bs;
1434: }
1435: } else {
1436: for (i=0; i<nzB; i++) {
1437: if (cmap[cworkB[i]/bs] < cstart)
1438: idx_p[i] = cmap[cworkB[i]/bs]*bs + cworkB[i]%bs ;
1439: else break;
1440: }
1441: imark = i;
1442: }
1443: for (i=0; i<nzA; i++) idx_p[imark+i] = cstart*bs + cworkA[i];
1444: for (i=imark; i<nzB; i++) idx_p[nzA+i] = cmap[cworkB[i]/bs]*bs + cworkB[i]%bs ;
1445: }
1446: } else {
1447: if (idx) *idx = 0;
1448: if (v) *v = 0;
1449: }
1450: }
1451: *nz = nztot;
1452: (*mat->A->ops->restorerow)(mat->A,lrow,&nzA,pcA,pvA);
1453: (*mat->B->ops->restorerow)(mat->B,lrow,&nzB,pcB,pvB);
1454: return(0);
1455: }
1459: PetscErrorCode MatRestoreRow_MPIBAIJ(Mat mat,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
1460: {
1461: Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data;
1464: if (!baij->getrowactive) {
1465: SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"MatGetRow not called");
1466: }
1467: baij->getrowactive = PETSC_FALSE;
1468: return(0);
1469: }
1473: PetscErrorCode MatZeroEntries_MPIBAIJ(Mat A)
1474: {
1475: Mat_MPIBAIJ *l = (Mat_MPIBAIJ*)A->data;
1479: MatZeroEntries(l->A);
1480: MatZeroEntries(l->B);
1481: return(0);
1482: }
1486: PetscErrorCode MatGetInfo_MPIBAIJ(Mat matin,MatInfoType flag,MatInfo *info)
1487: {
1488: Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)matin->data;
1489: Mat A = a->A,B = a->B;
1491: PetscReal isend[5],irecv[5];
1494: info->block_size = (PetscReal)matin->bs;
1495: MatGetInfo(A,MAT_LOCAL,info);
1496: isend[0] = info->nz_used; isend[1] = info->nz_allocated; isend[2] = info->nz_unneeded;
1497: isend[3] = info->memory; isend[4] = info->mallocs;
1498: MatGetInfo(B,MAT_LOCAL,info);
1499: isend[0] += info->nz_used; isend[1] += info->nz_allocated; isend[2] += info->nz_unneeded;
1500: isend[3] += info->memory; isend[4] += info->mallocs;
1501: if (flag == MAT_LOCAL) {
1502: info->nz_used = isend[0];
1503: info->nz_allocated = isend[1];
1504: info->nz_unneeded = isend[2];
1505: info->memory = isend[3];
1506: info->mallocs = isend[4];
1507: } else if (flag == MAT_GLOBAL_MAX) {
1508: MPI_Allreduce(isend,irecv,5,MPIU_REAL,MPI_MAX,matin->comm);
1509: info->nz_used = irecv[0];
1510: info->nz_allocated = irecv[1];
1511: info->nz_unneeded = irecv[2];
1512: info->memory = irecv[3];
1513: info->mallocs = irecv[4];
1514: } else if (flag == MAT_GLOBAL_SUM) {
1515: MPI_Allreduce(isend,irecv,5,MPIU_REAL,MPI_SUM,matin->comm);
1516: info->nz_used = irecv[0];
1517: info->nz_allocated = irecv[1];
1518: info->nz_unneeded = irecv[2];
1519: info->memory = irecv[3];
1520: info->mallocs = irecv[4];
1521: } else {
1522: SETERRQ1(PETSC_ERR_ARG_WRONG,"Unknown MatInfoType argument %d",(int)flag);
1523: }
1524: info->rows_global = (PetscReal)A->M;
1525: info->columns_global = (PetscReal)A->N;
1526: info->rows_local = (PetscReal)A->m;
1527: info->columns_local = (PetscReal)A->N;
1528: info->fill_ratio_given = 0; /* no parallel LU/ILU/Cholesky */
1529: info->fill_ratio_needed = 0;
1530: info->factor_mallocs = 0;
1531: return(0);
1532: }
1536: PetscErrorCode MatSetOption_MPIBAIJ(Mat A,MatOption op)
1537: {
1538: Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data;
1542: switch (op) {
1543: case MAT_NO_NEW_NONZERO_LOCATIONS:
1544: case MAT_YES_NEW_NONZERO_LOCATIONS:
1545: case MAT_COLUMNS_UNSORTED:
1546: case MAT_COLUMNS_SORTED:
1547: case MAT_NEW_NONZERO_ALLOCATION_ERR:
1548: case MAT_KEEP_ZEROED_ROWS:
1549: case MAT_NEW_NONZERO_LOCATION_ERR:
1550: MatSetOption(a->A,op);
1551: MatSetOption(a->B,op);
1552: break;
1553: case MAT_ROW_ORIENTED:
1554: a->roworiented = PETSC_TRUE;
1555: MatSetOption(a->A,op);
1556: MatSetOption(a->B,op);
1557: break;
1558: case MAT_ROWS_SORTED:
1559: case MAT_ROWS_UNSORTED:
1560: case MAT_YES_NEW_DIAGONALS:
1561: PetscLogInfo((A,"Info:MatSetOption_MPIBAIJ:Option ignored\n"));
1562: break;
1563: case MAT_COLUMN_ORIENTED:
1564: a->roworiented = PETSC_FALSE;
1565: MatSetOption(a->A,op);
1566: MatSetOption(a->B,op);
1567: break;
1568: case MAT_IGNORE_OFF_PROC_ENTRIES:
1569: a->donotstash = PETSC_TRUE;
1570: break;
1571: case MAT_NO_NEW_DIAGONALS:
1572: SETERRQ(PETSC_ERR_SUP,"MAT_NO_NEW_DIAGONALS");
1573: case MAT_USE_HASH_TABLE:
1574: a->ht_flag = PETSC_TRUE;
1575: break;
1576: case MAT_SYMMETRIC:
1577: case MAT_STRUCTURALLY_SYMMETRIC:
1578: case MAT_HERMITIAN:
1579: case MAT_SYMMETRY_ETERNAL:
1580: MatSetOption(a->A,op);
1581: break;
1582: case MAT_NOT_SYMMETRIC:
1583: case MAT_NOT_STRUCTURALLY_SYMMETRIC:
1584: case MAT_NOT_HERMITIAN:
1585: case MAT_NOT_SYMMETRY_ETERNAL:
1586: break;
1587: default:
1588: SETERRQ(PETSC_ERR_SUP,"unknown option");
1589: }
1590: return(0);
1591: }
1595: PetscErrorCode MatTranspose_MPIBAIJ(Mat A,Mat *matout)
1596: {
1597: Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)A->data;
1598: Mat_SeqBAIJ *Aloc;
1599: Mat B;
1601: PetscInt M=A->M,N=A->N,*ai,*aj,i,*rvals,j,k,col;
1602: PetscInt bs=A->bs,mbs=baij->mbs;
1603: MatScalar *a;
1604:
1606: if (!matout && M != N) SETERRQ(PETSC_ERR_ARG_SIZ,"Square matrix only for in-place");
1607: MatCreate(A->comm,&B);
1608: MatSetSizes(B,A->n,A->m,N,M);
1609: MatSetType(B,A->type_name);
1610: MatMPIBAIJSetPreallocation(B,A->bs,0,PETSC_NULL,0,PETSC_NULL);
1611:
1612: /* copy over the A part */
1613: Aloc = (Mat_SeqBAIJ*)baij->A->data;
1614: ai = Aloc->i; aj = Aloc->j; a = Aloc->a;
1615: PetscMalloc(bs*sizeof(PetscInt),&rvals);
1616:
1617: for (i=0; i<mbs; i++) {
1618: rvals[0] = bs*(baij->rstart + i);
1619: for (j=1; j<bs; j++) { rvals[j] = rvals[j-1] + 1; }
1620: for (j=ai[i]; j<ai[i+1]; j++) {
1621: col = (baij->cstart+aj[j])*bs;
1622: for (k=0; k<bs; k++) {
1623: MatSetValues_MPIBAIJ_MatScalar(B,1,&col,bs,rvals,a,INSERT_VALUES);
1624: col++; a += bs;
1625: }
1626: }
1627: }
1628: /* copy over the B part */
1629: Aloc = (Mat_SeqBAIJ*)baij->B->data;
1630: ai = Aloc->i; aj = Aloc->j; a = Aloc->a;
1631: for (i=0; i<mbs; i++) {
1632: rvals[0] = bs*(baij->rstart + i);
1633: for (j=1; j<bs; j++) { rvals[j] = rvals[j-1] + 1; }
1634: for (j=ai[i]; j<ai[i+1]; j++) {
1635: col = baij->garray[aj[j]]*bs;
1636: for (k=0; k<bs; k++) {
1637: MatSetValues_MPIBAIJ_MatScalar(B,1,&col,bs,rvals,a,INSERT_VALUES);
1638: col++; a += bs;
1639: }
1640: }
1641: }
1642: PetscFree(rvals);
1643: MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
1644: MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);
1645:
1646: if (matout) {
1647: *matout = B;
1648: } else {
1649: MatHeaderCopy(A,B);
1650: }
1651: return(0);
1652: }
1656: PetscErrorCode MatDiagonalScale_MPIBAIJ(Mat mat,Vec ll,Vec rr)
1657: {
1658: Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data;
1659: Mat a = baij->A,b = baij->B;
1661: PetscInt s1,s2,s3;
1664: MatGetLocalSize(mat,&s2,&s3);
1665: if (rr) {
1666: VecGetLocalSize(rr,&s1);
1667: if (s1!=s3) SETERRQ(PETSC_ERR_ARG_SIZ,"right vector non-conforming local size");
1668: /* Overlap communication with computation. */
1669: VecScatterBegin(rr,baij->lvec,INSERT_VALUES,SCATTER_FORWARD,baij->Mvctx);
1670: }
1671: if (ll) {
1672: VecGetLocalSize(ll,&s1);
1673: if (s1!=s2) SETERRQ(PETSC_ERR_ARG_SIZ,"left vector non-conforming local size");
1674: (*b->ops->diagonalscale)(b,ll,PETSC_NULL);
1675: }
1676: /* scale the diagonal block */
1677: (*a->ops->diagonalscale)(a,ll,rr);
1679: if (rr) {
1680: /* Do a scatter end and then right scale the off-diagonal block */
1681: VecScatterEnd(rr,baij->lvec,INSERT_VALUES,SCATTER_FORWARD,baij->Mvctx);
1682: (*b->ops->diagonalscale)(b,PETSC_NULL,baij->lvec);
1683: }
1684:
1685: return(0);
1686: }
1690: PetscErrorCode MatZeroRows_MPIBAIJ(Mat A,PetscInt N,const PetscInt rows[],PetscScalar diag)
1691: {
1692: Mat_MPIBAIJ *l = (Mat_MPIBAIJ*)A->data;
1694: PetscMPIInt imdex,size = l->size,n,rank = l->rank;
1695: PetscInt i,*owners = l->rowners;
1696: PetscInt *nprocs,j,idx,nsends,row;
1697: PetscInt nmax,*svalues,*starts,*owner,nrecvs;
1698: PetscInt *rvalues,tag = A->tag,count,base,slen,*source,lastidx = -1;
1699: PetscInt *lens,*lrows,*values,bs=A->bs,rstart_bs=l->rstart_bs;
1700: MPI_Comm comm = A->comm;
1701: MPI_Request *send_waits,*recv_waits;
1702: MPI_Status recv_status,*send_status;
1703: #if defined(PETSC_DEBUG)
1704: PetscTruth found = PETSC_FALSE;
1705: #endif
1706:
1708: /* first count number of contributors to each processor */
1709: PetscMalloc(2*size*sizeof(PetscInt),&nprocs);
1710: PetscMemzero(nprocs,2*size*sizeof(PetscInt));
1711: PetscMalloc((N+1)*sizeof(PetscInt),&owner); /* see note*/
1712: j = 0;
1713: for (i=0; i<N; i++) {
1714: if (lastidx > (idx = rows[i])) j = 0;
1715: lastidx = idx;
1716: for (; j<size; j++) {
1717: if (idx >= owners[j]*bs && idx < owners[j+1]*bs) {
1718: nprocs[2*j]++;
1719: nprocs[2*j+1] = 1;
1720: owner[i] = j;
1721: #if defined(PETSC_DEBUG)
1722: found = PETSC_TRUE;
1723: #endif
1724: break;
1725: }
1726: }
1727: #if defined(PETSC_DEBUG)
1728: if (!found) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Index out of range");
1729: found = PETSC_FALSE;
1730: #endif
1731: }
1732: nsends = 0; for (i=0; i<size; i++) { nsends += nprocs[2*i+1];}
1733:
1734: /* inform other processors of number of messages and max length*/
1735: PetscMaxSum(comm,nprocs,&nmax,&nrecvs);
1736:
1737: /* post receives: */
1738: PetscMalloc((nrecvs+1)*(nmax+1)*sizeof(PetscInt),&rvalues);
1739: PetscMalloc((nrecvs+1)*sizeof(MPI_Request),&recv_waits);
1740: for (i=0; i<nrecvs; i++) {
1741: MPI_Irecv(rvalues+nmax*i,nmax,MPIU_INT,MPI_ANY_SOURCE,tag,comm,recv_waits+i);
1742: }
1743:
1744: /* do sends:
1745: 1) starts[i] gives the starting index in svalues for stuff going to
1746: the ith processor
1747: */
1748: PetscMalloc((N+1)*sizeof(PetscInt),&svalues);
1749: PetscMalloc((nsends+1)*sizeof(MPI_Request),&send_waits);
1750: PetscMalloc((size+1)*sizeof(PetscInt),&starts);
1751: starts[0] = 0;
1752: for (i=1; i<size; i++) { starts[i] = starts[i-1] + nprocs[2*i-2];}
1753: for (i=0; i<N; i++) {
1754: svalues[starts[owner[i]]++] = rows[i];
1755: }
1756:
1757: starts[0] = 0;
1758: for (i=1; i<size+1; i++) { starts[i] = starts[i-1] + nprocs[2*i-2];}
1759: count = 0;
1760: for (i=0; i<size; i++) {
1761: if (nprocs[2*i+1]) {
1762: MPI_Isend(svalues+starts[i],nprocs[2*i],MPIU_INT,i,tag,comm,send_waits+count++);
1763: }
1764: }
1765: PetscFree(starts);
1767: base = owners[rank]*bs;
1768:
1769: /* wait on receives */
1770: PetscMalloc(2*(nrecvs+1)*sizeof(PetscInt),&lens);
1771: source = lens + nrecvs;
1772: count = nrecvs; slen = 0;
1773: while (count) {
1774: MPI_Waitany(nrecvs,recv_waits,&imdex,&recv_status);
1775: /* unpack receives into our local space */
1776: MPI_Get_count(&recv_status,MPIU_INT,&n);
1777: source[imdex] = recv_status.MPI_SOURCE;
1778: lens[imdex] = n;
1779: slen += n;
1780: count--;
1781: }
1782: PetscFree(recv_waits);
1783:
1784: /* move the data into the send scatter */
1785: PetscMalloc((slen+1)*sizeof(PetscInt),&lrows);
1786: count = 0;
1787: for (i=0; i<nrecvs; i++) {
1788: values = rvalues + i*nmax;
1789: for (j=0; j<lens[i]; j++) {
1790: lrows[count++] = values[j] - base;
1791: }
1792: }
1793: PetscFree(rvalues);
1794: PetscFree(lens);
1795: PetscFree(owner);
1796: PetscFree(nprocs);
1797:
1798: /* actually zap the local rows */
1799: /*
1800: Zero the required rows. If the "diagonal block" of the matrix
1801: is square and the user wishes to set the diagonal we use seperate
1802: code so that MatSetValues() is not called for each diagonal allocating
1803: new memory, thus calling lots of mallocs and slowing things down.
1805: Contributed by: Matthew Knepley
1806: */
1807: /* must zero l->B before l->A because the (diag) case below may put values into l->B*/
1808: MatZeroRows_SeqBAIJ(l->B,slen,lrows,0.0);
1809: if ((diag != 0.0) && (l->A->M == l->A->N)) {
1810: MatZeroRows_SeqBAIJ(l->A,slen,lrows,diag);
1811: } else if (diag != 0.0) {
1812: MatZeroRows_SeqBAIJ(l->A,slen,lrows,0.0);
1813: if (((Mat_SeqBAIJ*)l->A->data)->nonew) {
1814: SETERRQ(PETSC_ERR_SUP,"MatZeroRows() on rectangular matrices cannot be used with the Mat options \n\
1815: MAT_NO_NEW_NONZERO_LOCATIONS,MAT_NEW_NONZERO_LOCATION_ERR,MAT_NEW_NONZERO_ALLOCATION_ERR");
1816: }
1817: for (i=0; i<slen; i++) {
1818: row = lrows[i] + rstart_bs;
1819: MatSetValues(A,1,&row,1,&row,&diag,INSERT_VALUES);
1820: }
1821: MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
1822: MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
1823: } else {
1824: MatZeroRows_SeqBAIJ(l->A,slen,lrows,0.0);
1825: }
1827: PetscFree(lrows);
1829: /* wait on sends */
1830: if (nsends) {
1831: PetscMalloc(nsends*sizeof(MPI_Status),&send_status);
1832: MPI_Waitall(nsends,send_waits,send_status);
1833: PetscFree(send_status);
1834: }
1835: PetscFree(send_waits);
1836: PetscFree(svalues);
1838: return(0);
1839: }
1843: PetscErrorCode MatPrintHelp_MPIBAIJ(Mat A)
1844: {
1845: Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data;
1846: MPI_Comm comm = A->comm;
1847: static PetscTruth called = PETSC_FALSE;
1848: PetscErrorCode ierr;
1851: if (!a->rank) {
1852: MatPrintHelp_SeqBAIJ(a->A);
1853: }
1854: if (called) {return(0);} else called = PETSC_TRUE;
1855: (*PetscHelpPrintf)(comm," Options for MATMPIBAIJ matrix format (the defaults):\n");
1856: (*PetscHelpPrintf)(comm," -mat_use_hash_table <factor>: Use hashtable for efficient matrix assembly\n");
1857: return(0);
1858: }
1862: PetscErrorCode MatSetUnfactored_MPIBAIJ(Mat A)
1863: {
1864: Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data;
1868: MatSetUnfactored(a->A);
1869: return(0);
1870: }
1872: static PetscErrorCode MatDuplicate_MPIBAIJ(Mat,MatDuplicateOption,Mat *);
1876: PetscErrorCode MatEqual_MPIBAIJ(Mat A,Mat B,PetscTruth *flag)
1877: {
1878: Mat_MPIBAIJ *matB = (Mat_MPIBAIJ*)B->data,*matA = (Mat_MPIBAIJ*)A->data;
1879: Mat a,b,c,d;
1880: PetscTruth flg;
1884: a = matA->A; b = matA->B;
1885: c = matB->A; d = matB->B;
1887: MatEqual(a,c,&flg);
1888: if (flg) {
1889: MatEqual(b,d,&flg);
1890: }
1891: MPI_Allreduce(&flg,flag,1,MPI_INT,MPI_LAND,A->comm);
1892: return(0);
1893: }
1898: PetscErrorCode MatSetUpPreallocation_MPIBAIJ(Mat A)
1899: {
1903: MatMPIBAIJSetPreallocation(A,1,PETSC_DEFAULT,0,PETSC_DEFAULT,0);
1904: return(0);
1905: }
1907: /* -------------------------------------------------------------------*/
1908: static struct _MatOps MatOps_Values = {
1909: MatSetValues_MPIBAIJ,
1910: MatGetRow_MPIBAIJ,
1911: MatRestoreRow_MPIBAIJ,
1912: MatMult_MPIBAIJ,
1913: /* 4*/ MatMultAdd_MPIBAIJ,
1914: MatMultTranspose_MPIBAIJ,
1915: MatMultTransposeAdd_MPIBAIJ,
1916: 0,
1917: 0,
1918: 0,
1919: /*10*/ 0,
1920: 0,
1921: 0,
1922: 0,
1923: MatTranspose_MPIBAIJ,
1924: /*15*/ MatGetInfo_MPIBAIJ,
1925: MatEqual_MPIBAIJ,
1926: MatGetDiagonal_MPIBAIJ,
1927: MatDiagonalScale_MPIBAIJ,
1928: MatNorm_MPIBAIJ,
1929: /*20*/ MatAssemblyBegin_MPIBAIJ,
1930: MatAssemblyEnd_MPIBAIJ,
1931: 0,
1932: MatSetOption_MPIBAIJ,
1933: MatZeroEntries_MPIBAIJ,
1934: /*25*/ MatZeroRows_MPIBAIJ,
1935: 0,
1936: 0,
1937: 0,
1938: 0,
1939: /*30*/ MatSetUpPreallocation_MPIBAIJ,
1940: 0,
1941: 0,
1942: 0,
1943: 0,
1944: /*35*/ MatDuplicate_MPIBAIJ,
1945: 0,
1946: 0,
1947: 0,
1948: 0,
1949: /*40*/ 0,
1950: MatGetSubMatrices_MPIBAIJ,
1951: MatIncreaseOverlap_MPIBAIJ,
1952: MatGetValues_MPIBAIJ,
1953: 0,
1954: /*45*/ MatPrintHelp_MPIBAIJ,
1955: MatScale_MPIBAIJ,
1956: 0,
1957: 0,
1958: 0,
1959: /*50*/ 0,
1960: 0,
1961: 0,
1962: 0,
1963: 0,
1964: /*55*/ 0,
1965: 0,
1966: MatSetUnfactored_MPIBAIJ,
1967: 0,
1968: MatSetValuesBlocked_MPIBAIJ,
1969: /*60*/ 0,
1970: MatDestroy_MPIBAIJ,
1971: MatView_MPIBAIJ,
1972: MatGetPetscMaps_Petsc,
1973: 0,
1974: /*65*/ 0,
1975: 0,
1976: 0,
1977: 0,
1978: 0,
1979: /*70*/ MatGetRowMax_MPIBAIJ,
1980: 0,
1981: 0,
1982: 0,
1983: 0,
1984: /*75*/ 0,
1985: 0,
1986: 0,
1987: 0,
1988: 0,
1989: /*80*/ 0,
1990: 0,
1991: 0,
1992: 0,
1993: MatLoad_MPIBAIJ,
1994: /*85*/ 0,
1995: 0,
1996: 0,
1997: 0,
1998: 0,
1999: /*90*/ 0,
2000: 0,
2001: 0,
2002: 0,
2003: 0,
2004: /*95*/ 0,
2005: 0,
2006: 0,
2007: 0};
2013: PetscErrorCode PETSCMAT_DLLEXPORT MatGetDiagonalBlock_MPIBAIJ(Mat A,PetscTruth *iscopy,MatReuse reuse,Mat *a)
2014: {
2016: *a = ((Mat_MPIBAIJ *)A->data)->A;
2017: *iscopy = PETSC_FALSE;
2018: return(0);
2019: }
2028: PetscErrorCode MatMPIBAIJSetPreallocationCSR_MPIBAIJ(Mat B,PetscInt bs,const PetscInt I[],const PetscInt J[],const PetscScalar v[])
2029: {
2030: Mat_MPIBAIJ *b = (Mat_MPIBAIJ *)B->data;
2031: PetscInt m = B->m/bs,cstart = b->cstart, cend = b->cend,j,nnz,i,d;
2032: PetscInt *d_nnz,*o_nnz,nnz_max = 0,rstart = b->rstart,ii;
2033: const PetscInt *JJ;
2034: PetscScalar *values;
2038: #if defined(PETSC_OPT_g)
2039: if (I[0]) SETERRQ1(PETSC_ERR_ARG_RANGE,"I[0] must be 0 it is %D",I[0]);
2040: #endif
2041: PetscMalloc((2*m+1)*sizeof(PetscInt),&d_nnz);
2042: o_nnz = d_nnz + m;
2044: for (i=0; i<m; i++) {
2045: nnz = I[i+1]- I[i];
2046: JJ = J + I[i];
2047: nnz_max = PetscMax(nnz_max,nnz);
2048: #if defined(PETSC_OPT_g)
2049: if (nnz < 0) SETERRQ1(PETSC_ERR_ARG_RANGE,"Local row %D has a negative %D number of columns",i,nnz);
2050: #endif
2051: for (j=0; j<nnz; j++) {
2052: if (*JJ >= cstart) break;
2053: JJ++;
2054: }
2055: d = 0;
2056: for (; j<nnz; j++) {
2057: if (*JJ++ >= cend) break;
2058: d++;
2059: }
2060: d_nnz[i] = d;
2061: o_nnz[i] = nnz - d;
2062: }
2063: MatMPIBAIJSetPreallocation(B,bs,0,d_nnz,0,o_nnz);
2064: PetscFree(d_nnz);
2066: if (v) values = (PetscScalar*)v;
2067: else {
2068: PetscMalloc(bs*bs*(nnz_max+1)*sizeof(PetscScalar),&values);
2069: PetscMemzero(values,bs*bs*nnz_max*sizeof(PetscScalar));
2070: }
2072: MatSetOption(B,MAT_COLUMNS_SORTED);
2073: for (i=0; i<m; i++) {
2074: ii = i + rstart;
2075: nnz = I[i+1]- I[i];
2076: MatSetValuesBlocked_MPIBAIJ(B,1,&ii,nnz,J+I[i],values,INSERT_VALUES);
2077: }
2078: MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
2079: MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);
2080: MatSetOption(B,MAT_COLUMNS_UNSORTED);
2082: if (!v) {
2083: PetscFree(values);
2084: }
2085: return(0);
2086: }
2090: /*@C
2091: MatMPIBAIJSetPreallocationCSR - Allocates memory for a sparse parallel matrix in AIJ format
2092: (the default parallel PETSc format).
2094: Collective on MPI_Comm
2096: Input Parameters:
2097: + A - the matrix
2098: . i - the indices into j for the start of each local row (starts with zero)
2099: . j - the column indices for each local row (starts with zero) these must be sorted for each row
2100: - v - optional values in the matrix
2102: Level: developer
2104: .keywords: matrix, aij, compressed row, sparse, parallel
2106: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIBAIJSetPreallocation(), MatCreateMPIAIJ(), MPIAIJ
2107: @*/
2108: PetscErrorCode PETSCMAT_DLLEXPORT MatMPIBAIJSetPreallocationCSR(Mat B,PetscInt bs,const PetscInt i[],const PetscInt j[], const PetscScalar v[])
2109: {
2110: PetscErrorCode ierr,(*f)(Mat,PetscInt,const PetscInt[],const PetscInt[],const PetscScalar[]);
2113: PetscObjectQueryFunction((PetscObject)B,"MatMPIBAIJSetPreallocationCSR_C",(void (**)(void))&f);
2114: if (f) {
2115: (*f)(B,bs,i,j,v);
2116: }
2117: return(0);
2118: }
2123: PetscErrorCode PETSCMAT_DLLEXPORT MatMPIBAIJSetPreallocation_MPIBAIJ(Mat B,PetscInt bs,PetscInt d_nz,PetscInt *d_nnz,PetscInt o_nz,PetscInt *o_nnz)
2124: {
2125: Mat_MPIBAIJ *b;
2127: PetscInt i;
2130: B->preallocated = PETSC_TRUE;
2131: PetscOptionsGetInt(PETSC_NULL,"-mat_block_size",&bs,PETSC_NULL);
2133: if (bs < 1) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Invalid block size specified, must be positive");
2134: if (d_nz == PETSC_DEFAULT || d_nz == PETSC_DECIDE) d_nz = 5;
2135: if (o_nz == PETSC_DEFAULT || o_nz == PETSC_DECIDE) o_nz = 2;
2136: if (d_nz < 0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"d_nz cannot be less than 0: value %D",d_nz);
2137: if (o_nz < 0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"o_nz cannot be less than 0: value %D",o_nz);
2138: if (d_nnz) {
2139: for (i=0; i<B->m/bs; i++) {
2140: if (d_nnz[i] < 0) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"d_nnz cannot be less than -1: local row %D value %D",i,d_nnz[i]);
2141: }
2142: }
2143: if (o_nnz) {
2144: for (i=0; i<B->m/bs; i++) {
2145: if (o_nnz[i] < 0) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"o_nnz cannot be less than -1: local row %D value %D",i,o_nnz[i]);
2146: }
2147: }
2148:
2149: PetscSplitOwnershipBlock(B->comm,bs,&B->m,&B->M);
2150: PetscSplitOwnershipBlock(B->comm,bs,&B->n,&B->N);
2151: PetscMapCreateMPI(B->comm,B->m,B->M,&B->rmap);
2152: PetscMapCreateMPI(B->comm,B->n,B->N,&B->cmap);
2154: b = (Mat_MPIBAIJ*)B->data;
2155: B->bs = bs;
2156: b->bs2 = bs*bs;
2157: b->mbs = B->m/bs;
2158: b->nbs = B->n/bs;
2159: b->Mbs = B->M/bs;
2160: b->Nbs = B->N/bs;
2162: MPI_Allgather(&b->mbs,1,MPIU_INT,b->rowners+1,1,MPIU_INT,B->comm);
2163: b->rowners[0] = 0;
2164: for (i=2; i<=b->size; i++) {
2165: b->rowners[i] += b->rowners[i-1];
2166: }
2167: b->rstart = b->rowners[b->rank];
2168: b->rend = b->rowners[b->rank+1];
2170: MPI_Allgather(&b->nbs,1,MPIU_INT,b->cowners+1,1,MPIU_INT,B->comm);
2171: b->cowners[0] = 0;
2172: for (i=2; i<=b->size; i++) {
2173: b->cowners[i] += b->cowners[i-1];
2174: }
2175: b->cstart = b->cowners[b->rank];
2176: b->cend = b->cowners[b->rank+1];
2178: for (i=0; i<=b->size; i++) {
2179: b->rowners_bs[i] = b->rowners[i]*bs;
2180: }
2181: b->rstart_bs = b->rstart*bs;
2182: b->rend_bs = b->rend*bs;
2183: b->cstart_bs = b->cstart*bs;
2184: b->cend_bs = b->cend*bs;
2186: MatCreate(PETSC_COMM_SELF,&b->A);
2187: MatSetSizes(b->A,B->m,B->n,B->m,B->n);
2188: MatSetType(b->A,MATSEQBAIJ);
2189: MatSeqBAIJSetPreallocation(b->A,bs,d_nz,d_nnz);
2190: PetscLogObjectParent(B,b->A);
2191: MatCreate(PETSC_COMM_SELF,&b->B);
2192: MatSetSizes(b->B,B->m,B->N,B->m,B->N);
2193: MatSetType(b->B,MATSEQBAIJ);
2194: MatSeqBAIJSetPreallocation(b->B,bs,o_nz,o_nnz);
2195: PetscLogObjectParent(B,b->B);
2197: MatStashCreate_Private(B->comm,bs,&B->bstash);
2199: return(0);
2200: }
2204: EXTERN PetscErrorCode PETSCMAT_DLLEXPORT MatDiagonalScaleLocal_MPIBAIJ(Mat,Vec);
2205: EXTERN PetscErrorCode PETSCMAT_DLLEXPORT MatSetHashTableFactor_MPIBAIJ(Mat,PetscReal);
2208: /*MC
2209: MATMPIBAIJ - MATMPIBAIJ = "mpibaij" - A matrix type to be used for distributed block sparse matrices.
2211: Options Database Keys:
2212: . -mat_type mpibaij - sets the matrix type to "mpibaij" during a call to MatSetFromOptions()
2214: Level: beginner
2216: .seealso: MatCreateMPIBAIJ
2217: M*/
2222: PetscErrorCode PETSCMAT_DLLEXPORT MatCreate_MPIBAIJ(Mat B)
2223: {
2224: Mat_MPIBAIJ *b;
2226: PetscTruth flg;
2229: PetscNew(Mat_MPIBAIJ,&b);
2230: B->data = (void*)b;
2233: PetscMemcpy(B->ops,&MatOps_Values,sizeof(struct _MatOps));
2234: B->mapping = 0;
2235: B->factor = 0;
2236: B->assembled = PETSC_FALSE;
2238: B->insertmode = NOT_SET_VALUES;
2239: MPI_Comm_rank(B->comm,&b->rank);
2240: MPI_Comm_size(B->comm,&b->size);
2242: /* build local table of row and column ownerships */
2243: PetscMalloc(3*(b->size+2)*sizeof(PetscInt),&b->rowners);
2244: PetscLogObjectMemory(B,3*(b->size+2)*sizeof(PetscInt)+sizeof(struct _p_Mat)+sizeof(Mat_MPIBAIJ));
2245: b->cowners = b->rowners + b->size + 2;
2246: b->rowners_bs = b->cowners + b->size + 2;
2248: /* build cache for off array entries formed */
2249: MatStashCreate_Private(B->comm,1,&B->stash);
2250: b->donotstash = PETSC_FALSE;
2251: b->colmap = PETSC_NULL;
2252: b->garray = PETSC_NULL;
2253: b->roworiented = PETSC_TRUE;
2255: #if defined(PETSC_USE_MAT_SINGLE)
2256: /* stuff for MatSetValues_XXX in single precision */
2257: b->setvalueslen = 0;
2258: b->setvaluescopy = PETSC_NULL;
2259: #endif
2261: /* stuff used in block assembly */
2262: b->barray = 0;
2264: /* stuff used for matrix vector multiply */
2265: b->lvec = 0;
2266: b->Mvctx = 0;
2268: /* stuff for MatGetRow() */
2269: b->rowindices = 0;
2270: b->rowvalues = 0;
2271: b->getrowactive = PETSC_FALSE;
2273: /* hash table stuff */
2274: b->ht = 0;
2275: b->hd = 0;
2276: b->ht_size = 0;
2277: b->ht_flag = PETSC_FALSE;
2278: b->ht_fact = 0;
2279: b->ht_total_ct = 0;
2280: b->ht_insert_ct = 0;
2282: PetscOptionsHasName(PETSC_NULL,"-mat_use_hash_table",&flg);
2283: if (flg) {
2284: PetscReal fact = 1.39;
2285: MatSetOption(B,MAT_USE_HASH_TABLE);
2286: PetscOptionsGetReal(PETSC_NULL,"-mat_use_hash_table",&fact,PETSC_NULL);
2287: if (fact <= 1.0) fact = 1.39;
2288: MatMPIBAIJSetHashTableFactor(B,fact);
2289: PetscLogInfo((0,"MatCreateMPIBAIJ:Hash table Factor used %5.2f\n",fact));
2290: }
2291: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatStoreValues_C",
2292: "MatStoreValues_MPIBAIJ",
2293: MatStoreValues_MPIBAIJ);
2294: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatRetrieveValues_C",
2295: "MatRetrieveValues_MPIBAIJ",
2296: MatRetrieveValues_MPIBAIJ);
2297: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatGetDiagonalBlock_C",
2298: "MatGetDiagonalBlock_MPIBAIJ",
2299: MatGetDiagonalBlock_MPIBAIJ);
2300: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatMPIBAIJSetPreallocation_C",
2301: "MatMPIBAIJSetPreallocation_MPIBAIJ",
2302: MatMPIBAIJSetPreallocation_MPIBAIJ);
2303: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatMPIBAIJSetPreallocationCSR_C",
2304: "MatMPIBAIJSetPreallocationCSR_MPIAIJ",
2305: MatMPIBAIJSetPreallocationCSR_MPIBAIJ);
2306: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatDiagonalScaleLocal_C",
2307: "MatDiagonalScaleLocal_MPIBAIJ",
2308: MatDiagonalScaleLocal_MPIBAIJ);
2309: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatSetHashTableFactor_C",
2310: "MatSetHashTableFactor_MPIBAIJ",
2311: MatSetHashTableFactor_MPIBAIJ);
2312: return(0);
2313: }
2316: /*MC
2317: MATBAIJ - MATBAIJ = "baij" - A matrix type to be used for block sparse matrices.
2319: This matrix type is identical to MATSEQBAIJ when constructed with a single process communicator,
2320: and MATMPIBAIJ otherwise.
2322: Options Database Keys:
2323: . -mat_type baij - sets the matrix type to "baij" during a call to MatSetFromOptions()
2325: Level: beginner
2327: .seealso: MatCreateMPIBAIJ(),MATSEQBAIJ,MATMPIBAIJ, MatMPIBAIJSetPreallocation(), MatMPIBAIJSetPreallocationCSR()
2328: M*/
2333: PetscErrorCode PETSCMAT_DLLEXPORT MatCreate_BAIJ(Mat A)
2334: {
2336: PetscMPIInt size;
2339: PetscObjectChangeTypeName((PetscObject)A,MATBAIJ);
2340: MPI_Comm_size(A->comm,&size);
2341: if (size == 1) {
2342: MatSetType(A,MATSEQBAIJ);
2343: } else {
2344: MatSetType(A,MATMPIBAIJ);
2345: }
2346: return(0);
2347: }
2352: /*@C
2353: MatMPIBAIJSetPreallocation - Allocates memory for a sparse parallel matrix in block AIJ format
2354: (block compressed row). For good matrix assembly performance
2355: the user should preallocate the matrix storage by setting the parameters
2356: d_nz (or d_nnz) and o_nz (or o_nnz). By setting these parameters accurately,
2357: performance can be increased by more than a factor of 50.
2359: Collective on Mat
2361: Input Parameters:
2362: + A - the matrix
2363: . bs - size of blockk
2364: . d_nz - number of block nonzeros per block row in diagonal portion of local
2365: submatrix (same for all local rows)
2366: . d_nnz - array containing the number of block nonzeros in the various block rows
2367: of the in diagonal portion of the local (possibly different for each block
2368: row) or PETSC_NULL. You must leave room for the diagonal entry even if it is zero.
2369: . o_nz - number of block nonzeros per block row in the off-diagonal portion of local
2370: submatrix (same for all local rows).
2371: - o_nnz - array containing the number of nonzeros in the various block rows of the
2372: off-diagonal portion of the local submatrix (possibly different for
2373: each block row) or PETSC_NULL.
2375: If the *_nnz parameter is given then the *_nz parameter is ignored
2377: Options Database Keys:
2378: . -mat_no_unroll - uses code that does not unroll the loops in the
2379: block calculations (much slower)
2380: . -mat_block_size - size of the blocks to use
2382: Notes:
2383: If PETSC_DECIDE or PETSC_DETERMINE is used for a particular argument on one processor
2384: than it must be used on all processors that share the object for that argument.
2386: Storage Information:
2387: For a square global matrix we define each processor's diagonal portion
2388: to be its local rows and the corresponding columns (a square submatrix);
2389: each processor's off-diagonal portion encompasses the remainder of the
2390: local matrix (a rectangular submatrix).
2392: The user can specify preallocated storage for the diagonal part of
2393: the local submatrix with either d_nz or d_nnz (not both). Set
2394: d_nz=PETSC_DEFAULT and d_nnz=PETSC_NULL for PETSc to control dynamic
2395: memory allocation. Likewise, specify preallocated storage for the
2396: off-diagonal part of the local submatrix with o_nz or o_nnz (not both).
2398: Consider a processor that owns rows 3, 4 and 5 of a parallel matrix. In
2399: the figure below we depict these three local rows and all columns (0-11).
2401: .vb
2402: 0 1 2 3 4 5 6 7 8 9 10 11
2403: -------------------
2404: row 3 | o o o d d d o o o o o o
2405: row 4 | o o o d d d o o o o o o
2406: row 5 | o o o d d d o o o o o o
2407: -------------------
2408: .ve
2409:
2410: Thus, any entries in the d locations are stored in the d (diagonal)
2411: submatrix, and any entries in the o locations are stored in the
2412: o (off-diagonal) submatrix. Note that the d and the o submatrices are
2413: stored simply in the MATSEQBAIJ format for compressed row storage.
2415: Now d_nz should indicate the number of block nonzeros per row in the d matrix,
2416: and o_nz should indicate the number of block nonzeros per row in the o matrix.
2417: In general, for PDE problems in which most nonzeros are near the diagonal,
2418: one expects d_nz >> o_nz. For large problems you MUST preallocate memory
2419: or you will get TERRIBLE performance; see the users' manual chapter on
2420: matrices.
2422: Level: intermediate
2424: .keywords: matrix, block, aij, compressed row, sparse, parallel
2426: .seealso: MatCreate(), MatCreateSeqBAIJ(), MatSetValues(), MatCreateMPIBAIJ(), MatMPIBAIJSetPreallocationCSR()
2427: @*/
2428: PetscErrorCode PETSCMAT_DLLEXPORT MatMPIBAIJSetPreallocation(Mat B,PetscInt bs,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[])
2429: {
2430: PetscErrorCode ierr,(*f)(Mat,PetscInt,PetscInt,const PetscInt[],PetscInt,const PetscInt[]);
2433: PetscObjectQueryFunction((PetscObject)B,"MatMPIBAIJSetPreallocation_C",(void (**)(void))&f);
2434: if (f) {
2435: (*f)(B,bs,d_nz,d_nnz,o_nz,o_nnz);
2436: }
2437: return(0);
2438: }
2442: /*@C
2443: MatCreateMPIBAIJ - Creates a sparse parallel matrix in block AIJ format
2444: (block compressed row). For good matrix assembly performance
2445: the user should preallocate the matrix storage by setting the parameters
2446: d_nz (or d_nnz) and o_nz (or o_nnz). By setting these parameters accurately,
2447: performance can be increased by more than a factor of 50.
2449: Collective on MPI_Comm
2451: Input Parameters:
2452: + comm - MPI communicator
2453: . bs - size of blockk
2454: . m - number of local rows (or PETSC_DECIDE to have calculated if M is given)
2455: This value should be the same as the local size used in creating the
2456: y vector for the matrix-vector product y = Ax.
2457: . n - number of local columns (or PETSC_DECIDE to have calculated if N is given)
2458: This value should be the same as the local size used in creating the
2459: x vector for the matrix-vector product y = Ax.
2460: . M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
2461: . N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
2462: . d_nz - number of nonzero blocks per block row in diagonal portion of local
2463: submatrix (same for all local rows)
2464: . d_nnz - array containing the number of nonzero blocks in the various block rows
2465: of the in diagonal portion of the local (possibly different for each block
2466: row) or PETSC_NULL. You must leave room for the diagonal entry even if it is zero.
2467: . o_nz - number of nonzero blocks per block row in the off-diagonal portion of local
2468: submatrix (same for all local rows).
2469: - o_nnz - array containing the number of nonzero blocks in the various block rows of the
2470: off-diagonal portion of the local submatrix (possibly different for
2471: each block row) or PETSC_NULL.
2473: Output Parameter:
2474: . A - the matrix
2476: Options Database Keys:
2477: . -mat_no_unroll - uses code that does not unroll the loops in the
2478: block calculations (much slower)
2479: . -mat_block_size - size of the blocks to use
2481: Notes:
2482: If the *_nnz parameter is given then the *_nz parameter is ignored
2484: A nonzero block is any block that as 1 or more nonzeros in it
2486: The user MUST specify either the local or global matrix dimensions
2487: (possibly both).
2489: If PETSC_DECIDE or PETSC_DETERMINE is used for a particular argument on one processor
2490: than it must be used on all processors that share the object for that argument.
2492: Storage Information:
2493: For a square global matrix we define each processor's diagonal portion
2494: to be its local rows and the corresponding columns (a square submatrix);
2495: each processor's off-diagonal portion encompasses the remainder of the
2496: local matrix (a rectangular submatrix).
2498: The user can specify preallocated storage for the diagonal part of
2499: the local submatrix with either d_nz or d_nnz (not both). Set
2500: d_nz=PETSC_DEFAULT and d_nnz=PETSC_NULL for PETSc to control dynamic
2501: memory allocation. Likewise, specify preallocated storage for the
2502: off-diagonal part of the local submatrix with o_nz or o_nnz (not both).
2504: Consider a processor that owns rows 3, 4 and 5 of a parallel matrix. In
2505: the figure below we depict these three local rows and all columns (0-11).
2507: .vb
2508: 0 1 2 3 4 5 6 7 8 9 10 11
2509: -------------------
2510: row 3 | o o o d d d o o o o o o
2511: row 4 | o o o d d d o o o o o o
2512: row 5 | o o o d d d o o o o o o
2513: -------------------
2514: .ve
2515:
2516: Thus, any entries in the d locations are stored in the d (diagonal)
2517: submatrix, and any entries in the o locations are stored in the
2518: o (off-diagonal) submatrix. Note that the d and the o submatrices are
2519: stored simply in the MATSEQBAIJ format for compressed row storage.
2521: Now d_nz should indicate the number of block nonzeros per row in the d matrix,
2522: and o_nz should indicate the number of block nonzeros per row in the o matrix.
2523: In general, for PDE problems in which most nonzeros are near the diagonal,
2524: one expects d_nz >> o_nz. For large problems you MUST preallocate memory
2525: or you will get TERRIBLE performance; see the users' manual chapter on
2526: matrices.
2528: Level: intermediate
2530: .keywords: matrix, block, aij, compressed row, sparse, parallel
2532: .seealso: MatCreate(), MatCreateSeqBAIJ(), MatSetValues(), MatCreateMPIBAIJ(), MatMPIBAIJSetPreallocation(), MatMPIBAIJSetPreallocationCSR()
2533: @*/
2534: PetscErrorCode PETSCMAT_DLLEXPORT MatCreateMPIBAIJ(MPI_Comm comm,PetscInt bs,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[],Mat *A)
2535: {
2537: PetscMPIInt size;
2540: MatCreate(comm,A);
2541: MatSetSizes(*A,m,n,M,N);
2542: MPI_Comm_size(comm,&size);
2543: if (size > 1) {
2544: MatSetType(*A,MATMPIBAIJ);
2545: MatMPIBAIJSetPreallocation(*A,bs,d_nz,d_nnz,o_nz,o_nnz);
2546: } else {
2547: MatSetType(*A,MATSEQBAIJ);
2548: MatSeqBAIJSetPreallocation(*A,bs,d_nz,d_nnz);
2549: }
2550: return(0);
2551: }
2555: static PetscErrorCode MatDuplicate_MPIBAIJ(Mat matin,MatDuplicateOption cpvalues,Mat *newmat)
2556: {
2557: Mat mat;
2558: Mat_MPIBAIJ *a,*oldmat = (Mat_MPIBAIJ*)matin->data;
2560: PetscInt len=0;
2563: *newmat = 0;
2564: MatCreate(matin->comm,&mat);
2565: MatSetSizes(mat,matin->m,matin->n,matin->M,matin->N);
2566: MatSetType(mat,matin->type_name);
2567: PetscMemcpy(mat->ops,matin->ops,sizeof(struct _MatOps));
2569: mat->factor = matin->factor;
2570: mat->preallocated = PETSC_TRUE;
2571: mat->assembled = PETSC_TRUE;
2572: mat->insertmode = NOT_SET_VALUES;
2574: a = (Mat_MPIBAIJ*)mat->data;
2575: mat->bs = matin->bs;
2576: a->bs2 = oldmat->bs2;
2577: a->mbs = oldmat->mbs;
2578: a->nbs = oldmat->nbs;
2579: a->Mbs = oldmat->Mbs;
2580: a->Nbs = oldmat->Nbs;
2581:
2582: a->rstart = oldmat->rstart;
2583: a->rend = oldmat->rend;
2584: a->cstart = oldmat->cstart;
2585: a->cend = oldmat->cend;
2586: a->size = oldmat->size;
2587: a->rank = oldmat->rank;
2588: a->donotstash = oldmat->donotstash;
2589: a->roworiented = oldmat->roworiented;
2590: a->rowindices = 0;
2591: a->rowvalues = 0;
2592: a->getrowactive = PETSC_FALSE;
2593: a->barray = 0;
2594: a->rstart_bs = oldmat->rstart_bs;
2595: a->rend_bs = oldmat->rend_bs;
2596: a->cstart_bs = oldmat->cstart_bs;
2597: a->cend_bs = oldmat->cend_bs;
2599: /* hash table stuff */
2600: a->ht = 0;
2601: a->hd = 0;
2602: a->ht_size = 0;
2603: a->ht_flag = oldmat->ht_flag;
2604: a->ht_fact = oldmat->ht_fact;
2605: a->ht_total_ct = 0;
2606: a->ht_insert_ct = 0;
2608: PetscMemcpy(a->rowners,oldmat->rowners,3*(a->size+2)*sizeof(PetscInt));
2609: MatStashCreate_Private(matin->comm,1,&mat->stash);
2610: MatStashCreate_Private(matin->comm,matin->bs,&mat->bstash);
2611: if (oldmat->colmap) {
2612: #if defined (PETSC_USE_CTABLE)
2613: PetscTableCreateCopy(oldmat->colmap,&a->colmap);
2614: #else
2615: PetscMalloc((a->Nbs)*sizeof(PetscInt),&a->colmap);
2616: PetscLogObjectMemory(mat,(a->Nbs)*sizeof(PetscInt));
2617: PetscMemcpy(a->colmap,oldmat->colmap,(a->Nbs)*sizeof(PetscInt));
2618: #endif
2619: } else a->colmap = 0;
2621: if (oldmat->garray && (len = ((Mat_SeqBAIJ*)(oldmat->B->data))->nbs)) {
2622: PetscMalloc(len*sizeof(PetscInt),&a->garray);
2623: PetscLogObjectMemory(mat,len*sizeof(PetscInt));
2624: PetscMemcpy(a->garray,oldmat->garray,len*sizeof(PetscInt));
2625: } else a->garray = 0;
2626:
2627: VecDuplicate(oldmat->lvec,&a->lvec);
2628: PetscLogObjectParent(mat,a->lvec);
2629: VecScatterCopy(oldmat->Mvctx,&a->Mvctx);
2630: PetscLogObjectParent(mat,a->Mvctx);
2632: MatDuplicate(oldmat->A,cpvalues,&a->A);
2633: PetscLogObjectParent(mat,a->A);
2634: MatDuplicate(oldmat->B,cpvalues,&a->B);
2635: PetscLogObjectParent(mat,a->B);
2636: PetscFListDuplicate(matin->qlist,&mat->qlist);
2637: *newmat = mat;
2639: return(0);
2640: }
2642: #include petscsys.h
2646: PetscErrorCode MatLoad_MPIBAIJ(PetscViewer viewer, MatType type,Mat *newmat)
2647: {
2648: Mat A;
2650: int fd;
2651: PetscInt i,nz,j,rstart,rend;
2652: PetscScalar *vals,*buf;
2653: MPI_Comm comm = ((PetscObject)viewer)->comm;
2654: MPI_Status status;
2655: PetscMPIInt rank,size,maxnz;
2656: PetscInt header[4],*rowlengths = 0,M,N,m,*rowners,*cols;
2657: PetscInt *locrowlens,*procsnz = 0,*browners;
2658: PetscInt jj,*mycols,*ibuf,bs=1,Mbs,mbs,extra_rows,mmax;
2659: PetscMPIInt tag = ((PetscObject)viewer)->tag;
2660: PetscInt *dlens,*odlens,*mask,*masked1,*masked2,rowcount,odcount;
2661: PetscInt dcount,kmax,k,nzcount,tmp,mend;
2662:
2664: PetscOptionsGetInt(PETSC_NULL,"-matload_block_size",&bs,PETSC_NULL);
2666: MPI_Comm_size(comm,&size);
2667: MPI_Comm_rank(comm,&rank);
2668: if (!rank) {
2669: PetscViewerBinaryGetDescriptor(viewer,&fd);
2670: PetscBinaryRead(fd,(char *)header,4,PETSC_INT);
2671: if (header[0] != MAT_FILE_COOKIE) SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"not matrix object");
2672: }
2674: MPI_Bcast(header+1,3,MPIU_INT,0,comm);
2675: M = header[1]; N = header[2];
2677: if (M != N) SETERRQ(PETSC_ERR_SUP,"Can only do square matrices");
2679: /*
2680: This code adds extra rows to make sure the number of rows is
2681: divisible by the blocksize
2682: */
2683: Mbs = M/bs;
2684: extra_rows = bs - M + bs*Mbs;
2685: if (extra_rows == bs) extra_rows = 0;
2686: else Mbs++;
2687: if (extra_rows && !rank) {
2688: PetscLogInfo((0,"MatLoad_MPIBAIJ:Padding loaded matrix to match blocksize\n"));
2689: }
2691: /* determine ownership of all rows */
2692: mbs = Mbs/size + ((Mbs % size) > rank);
2693: m = mbs*bs;
2694: PetscMalloc2(size+1,PetscInt,&rowners,size+1,PetscInt,&browners);
2695: MPI_Allgather(&mbs,1,MPIU_INT,rowners+1,1,MPIU_INT,comm);
2697: /* process 0 needs enough room for process with most rows */
2698: if (!rank) {
2699: mmax = rowners[1];
2700: for (i=2; i<size; i++) {
2701: mmax = PetscMax(mmax,rowners[i]);
2702: }
2703: mmax*=bs;
2704: } else mmax = m;
2706: rowners[0] = 0;
2707: for (i=2; i<=size; i++) rowners[i] += rowners[i-1];
2708: for (i=0; i<=size; i++) browners[i] = rowners[i]*bs;
2709: rstart = rowners[rank];
2710: rend = rowners[rank+1];
2712: /* distribute row lengths to all processors */
2713: PetscMalloc((mmax+1)*sizeof(PetscInt),&locrowlens);
2714: if (!rank) {
2715: mend = m;
2716: if (size == 1) mend = mend - extra_rows;
2717: PetscBinaryRead(fd,locrowlens,mend,PETSC_INT);
2718: for (j=mend; j<m; j++) locrowlens[j] = 1;
2719: PetscMalloc(m*sizeof(PetscInt),&rowlengths);
2720: PetscMalloc(size*sizeof(PetscInt),&procsnz);
2721: PetscMemzero(procsnz,size*sizeof(PetscInt));
2722: for (j=0; j<m; j++) {
2723: procsnz[0] += locrowlens[j];
2724: }
2725: for (i=1; i<size; i++) {
2726: mend = browners[i+1] - browners[i];
2727: if (i == size-1) mend = mend - extra_rows;
2728: PetscBinaryRead(fd,rowlengths,mend,PETSC_INT);
2729: for (j=mend; j<browners[i+1] - browners[i]; j++) rowlengths[j] = 1;
2730: /* calculate the number of nonzeros on each processor */
2731: for (j=0; j<browners[i+1]-browners[i]; j++) {
2732: procsnz[i] += rowlengths[j];
2733: }
2734: MPI_Send(rowlengths,browners[i+1]-browners[i],MPIU_INT,i,tag,comm);
2735: }
2736: PetscFree(rowlengths);
2737: } else {
2738: MPI_Recv(locrowlens,m,MPIU_INT,0,tag,comm,&status);
2739: }
2741: if (!rank) {
2742: /* determine max buffer needed and allocate it */
2743: maxnz = procsnz[0];
2744: for (i=1; i<size; i++) {
2745: maxnz = PetscMax(maxnz,procsnz[i]);
2746: }
2747: PetscMalloc(maxnz*sizeof(PetscInt),&cols);
2749: /* read in my part of the matrix column indices */
2750: nz = procsnz[0];
2751: PetscMalloc((nz+1)*sizeof(PetscInt),&ibuf);
2752: mycols = ibuf;
2753: if (size == 1) nz -= extra_rows;
2754: PetscBinaryRead(fd,mycols,nz,PETSC_INT);
2755: if (size == 1) for (i=0; i< extra_rows; i++) { mycols[nz+i] = M+i; }
2757: /* read in every ones (except the last) and ship off */
2758: for (i=1; i<size-1; i++) {
2759: nz = procsnz[i];
2760: PetscBinaryRead(fd,cols,nz,PETSC_INT);
2761: MPI_Send(cols,nz,MPIU_INT,i,tag,comm);
2762: }
2763: /* read in the stuff for the last proc */
2764: if (size != 1) {
2765: nz = procsnz[size-1] - extra_rows; /* the extra rows are not on the disk */
2766: PetscBinaryRead(fd,cols,nz,PETSC_INT);
2767: for (i=0; i<extra_rows; i++) cols[nz+i] = M+i;
2768: MPI_Send(cols,nz+extra_rows,MPIU_INT,size-1,tag,comm);
2769: }
2770: PetscFree(cols);
2771: } else {
2772: /* determine buffer space needed for message */
2773: nz = 0;
2774: for (i=0; i<m; i++) {
2775: nz += locrowlens[i];
2776: }
2777: PetscMalloc((nz+1)*sizeof(PetscInt),&ibuf);
2778: mycols = ibuf;
2779: /* receive message of column indices*/
2780: MPI_Recv(mycols,nz,MPIU_INT,0,tag,comm,&status);
2781: MPI_Get_count(&status,MPIU_INT,&maxnz);
2782: if (maxnz != nz) SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"something is wrong with file");
2783: }
2784:
2785: /* loop over local rows, determining number of off diagonal entries */
2786: PetscMalloc2(rend-rstart,PetscInt,&dlens,rend-rstart,PetscInt,&odlens);
2787: PetscMalloc3(Mbs,PetscInt,&mask,Mbs,PetscInt,&masked1,Mbs,PetscInt,&masked2);
2788: PetscMemzero(mask,Mbs*sizeof(PetscInt));
2789: PetscMemzero(masked1,Mbs*sizeof(PetscInt));
2790: PetscMemzero(masked2,Mbs*sizeof(PetscInt));
2791: rowcount = 0; nzcount = 0;
2792: for (i=0; i<mbs; i++) {
2793: dcount = 0;
2794: odcount = 0;
2795: for (j=0; j<bs; j++) {
2796: kmax = locrowlens[rowcount];
2797: for (k=0; k<kmax; k++) {
2798: tmp = mycols[nzcount++]/bs;
2799: if (!mask[tmp]) {
2800: mask[tmp] = 1;
2801: if (tmp < rstart || tmp >= rend) masked2[odcount++] = tmp;
2802: else masked1[dcount++] = tmp;
2803: }
2804: }
2805: rowcount++;
2806: }
2807:
2808: dlens[i] = dcount;
2809: odlens[i] = odcount;
2811: /* zero out the mask elements we set */
2812: for (j=0; j<dcount; j++) mask[masked1[j]] = 0;
2813: for (j=0; j<odcount; j++) mask[masked2[j]] = 0;
2814: }
2816: /* create our matrix */
2817: MatCreate(comm,&A);
2818: MatSetSizes(A,m,m,M+extra_rows,N+extra_rows);
2819: MatSetType(A,type);CHKERRQ(ierr)
2820: MatMPIBAIJSetPreallocation(A,bs,0,dlens,0,odlens);
2822: /* Why doesn't this called using MatSetOption(A,MAT_COLUMNS_SORTED); */
2823: MatSetOption(A,MAT_COLUMNS_SORTED);
2824:
2825: if (!rank) {
2826: PetscMalloc((maxnz+1)*sizeof(PetscScalar),&buf);
2827: /* read in my part of the matrix numerical values */
2828: nz = procsnz[0];
2829: vals = buf;
2830: mycols = ibuf;
2831: if (size == 1) nz -= extra_rows;
2832: PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);
2833: if (size == 1) for (i=0; i< extra_rows; i++) { vals[nz+i] = 1.0; }
2835: /* insert into matrix */
2836: jj = rstart*bs;
2837: for (i=0; i<m; i++) {
2838: MatSetValues_MPIBAIJ(A,1,&jj,locrowlens[i],mycols,vals,INSERT_VALUES);
2839: mycols += locrowlens[i];
2840: vals += locrowlens[i];
2841: jj++;
2842: }
2843: /* read in other processors (except the last one) and ship out */
2844: for (i=1; i<size-1; i++) {
2845: nz = procsnz[i];
2846: vals = buf;
2847: PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);
2848: MPI_Send(vals,nz,MPIU_SCALAR,i,A->tag,comm);
2849: }
2850: /* the last proc */
2851: if (size != 1){
2852: nz = procsnz[i] - extra_rows;
2853: vals = buf;
2854: PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);
2855: for (i=0; i<extra_rows; i++) vals[nz+i] = 1.0;
2856: MPI_Send(vals,nz+extra_rows,MPIU_SCALAR,size-1,A->tag,comm);
2857: }
2858: PetscFree(procsnz);
2859: } else {
2860: /* receive numeric values */
2861: PetscMalloc((nz+1)*sizeof(PetscScalar),&buf);
2863: /* receive message of values*/
2864: vals = buf;
2865: mycols = ibuf;
2866: MPI_Recv(vals,nz,MPIU_SCALAR,0,A->tag,comm,&status);
2867: MPI_Get_count(&status,MPIU_SCALAR,&maxnz);
2868: if (maxnz != nz) SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"something is wrong with file");
2870: /* insert into matrix */
2871: jj = rstart*bs;
2872: for (i=0; i<m; i++) {
2873: MatSetValues_MPIBAIJ(A,1,&jj,locrowlens[i],mycols,vals,INSERT_VALUES);
2874: mycols += locrowlens[i];
2875: vals += locrowlens[i];
2876: jj++;
2877: }
2878: }
2879: PetscFree(locrowlens);
2880: PetscFree(buf);
2881: PetscFree(ibuf);
2882: PetscFree2(rowners,browners);
2883: PetscFree2(dlens,odlens);
2884: PetscFree3(mask,masked1,masked2);
2885: MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
2886: MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
2888: *newmat = A;
2889: return(0);
2890: }
2894: /*@
2895: MatMPIBAIJSetHashTableFactor - Sets the factor required to compute the size of the HashTable.
2897: Input Parameters:
2898: . mat - the matrix
2899: . fact - factor
2901: Collective on Mat
2903: Level: advanced
2905: Notes:
2906: This can also be set by the command line option: -mat_use_hash_table fact
2908: .keywords: matrix, hashtable, factor, HT
2910: .seealso: MatSetOption()
2911: @*/
2912: PetscErrorCode PETSCMAT_DLLEXPORT MatMPIBAIJSetHashTableFactor(Mat mat,PetscReal fact)
2913: {
2914: PetscErrorCode ierr,(*f)(Mat,PetscReal);
2917: PetscObjectQueryFunction((PetscObject)mat,"MatSetHashTableFactor_C",(void (**)(void))&f);
2918: if (f) {
2919: (*f)(mat,fact);
2920: }
2921: return(0);
2922: }
2927: PetscErrorCode PETSCMAT_DLLEXPORT MatSetHashTableFactor_MPIBAIJ(Mat mat,PetscReal fact)
2928: {
2929: Mat_MPIBAIJ *baij;
2932: baij = (Mat_MPIBAIJ*)mat->data;
2933: baij->ht_fact = fact;
2934: return(0);
2935: }
2940: PetscErrorCode PETSCMAT_DLLEXPORT MatMPIBAIJGetSeqBAIJ(Mat A,Mat *Ad,Mat *Ao,PetscInt *colmap[])
2941: {
2942: Mat_MPIBAIJ *a = (Mat_MPIBAIJ *)A->data;
2944: *Ad = a->A;
2945: *Ao = a->B;
2946: *colmap = a->garray;
2947: return(0);
2948: }