Actual source code: ex42.c

  1: /*$Id: ex42.c,v 1.25 2001/08/07 03:03:07 balay Exp $*/

  3: static char help[] = "Tests MatIncreaseOverlap() and MatGetSubmatrices() for the parallel case.n
  4: This example is similar to ex40.c; here the index sets used are random.n
  5: Input arguments are:n
  6:   -f <input_file> : file to load.  For a 5X5 example of the 5-pt. stencil,n
  7:                        use the file petsc/src/mat/examples/matbinary.exn
  8:   -nd <size>      : > 0  no of domains per processor n
  9:   -ov <overlap>   : >=0  amount of overlap between domainsnn";

 11:  #include petscsles.h

 13: int main(int argc,char **args)
 14: {
 15:   int         ierr,nd = 2,ov=1,i,j,size,m,n,rank,*idx;
 16:   PetscTruth  flg;
 17:   Mat         A,B,*submatA,*submatB;
 18:   char        file[128];
 19:   PetscViewer fd;
 20:   IS          *is1,*is2;
 21:   PetscRandom r;
 22:   PetscScalar rand;

 24:   PetscInitialize(&argc,&args,(char *)0,help);
 25: #if defined(PETSC_USE_COMPLEX)
 26:   SETERRQ(1,"This example does not work with complex numbers");
 27: #else
 28: 
 29:   MPI_Comm_rank(PETSC_COMM_WORLD,&rank);
 30:   PetscOptionsGetString(PETSC_NULL,"-f",file,127,PETSC_NULL);
 31:   PetscOptionsGetInt(PETSC_NULL,"-nd",&nd,PETSC_NULL);
 32:   PetscOptionsGetInt(PETSC_NULL,"-ov",&ov,PETSC_NULL);

 34:   /* Read matrix and RHS */
 35:   PetscViewerBinaryOpen(PETSC_COMM_WORLD,file,PETSC_BINARY_RDONLY,&fd);
 36:   MatLoad(fd,MATMPIAIJ,&A);
 37:   PetscViewerDestroy(fd);

 39:   /* Read the matrix again as a seq matrix */
 40:   PetscViewerBinaryOpen(PETSC_COMM_SELF,file,PETSC_BINARY_RDONLY,&fd);
 41:   MatLoad(fd,MATSEQAIJ,&B);
 42:   PetscViewerDestroy(fd);
 43: 
 44:   /* Create the Random no generator */
 45:   MatGetSize(A,&m,&n);
 46:   PetscRandomCreate(PETSC_COMM_SELF,RANDOM_DEFAULT,&r);

 48:   /* Create the IS corresponding to subdomains */
 49:   PetscMalloc(nd*sizeof(IS **),&is1);
 50:   PetscMalloc(nd*sizeof(IS **),&is2);
 51:   PetscMalloc(m *sizeof(int),&idx);
 52: 
 53:   /* Create the random Index Sets */
 54:   for (i=0; i<nd; i++) {
 55:     /* Skip a few,so that the IS on different procs are diffeent*/
 56:     for (j=0; j<rank; j++) {
 57:       ierr   = PetscRandomGetValue(r,&rand);
 58:     }
 59:     ierr   = PetscRandomGetValue(r,&rand);
 60:     size   = (int)(rand*m);
 61:     for (j=0; j<size; j++) {
 62:       ierr   = PetscRandomGetValue(r,&rand);
 63:       idx[j] = (int)(rand*m);
 64:     }
 65:     PetscSortInt(size,idx);
 66:     ISCreateGeneral(PETSC_COMM_SELF,size,idx,is1+i);
 67:     ISCreateGeneral(PETSC_COMM_SELF,size,idx,is2+i);
 68:   }

 70:   MatIncreaseOverlap(A,nd,is1,ov);
 71:   MatIncreaseOverlap(B,nd,is2,ov);

 73:   for (i=0; i<nd; ++i) {
 74:     ISSort(is1[i]);
 75:     ISSort(is2[i]);
 76:   }
 77: 
 78:   MatGetSubMatrices(A,nd,is1,is1,MAT_INITIAL_MATRIX,&submatA);
 79:   MatGetSubMatrices(B,nd,is2,is2,MAT_INITIAL_MATRIX,&submatB);
 80: 
 81:   /* Now see if the serial and parallel case have the same answers */
 82:   for (i=0; i<nd; ++i) {
 83:     MatEqual(submatA[i],submatB[i],&flg);
 84:     PetscPrintf(PETSC_COMM_SELF,"proc:[%d], i=%d, flg =%dn",rank,i,flg);
 85:   }

 87:   /* Free Allocated Memory */
 88:   for (i=0; i<nd; ++i) {
 89:     ISDestroy(is1[i]);
 90:     ISDestroy(is2[i]);
 91:     MatDestroy(submatA[i]);
 92:     MatDestroy(submatB[i]);
 93:   }
 94:   PetscFree(submatA);
 95:   PetscFree(submatB);
 96:   PetscRandomDestroy(r);
 97:   PetscFree(is1);
 98:   PetscFree(is2);
 99:   MatDestroy(A);
100:   MatDestroy(B);
101:   PetscFree(idx);

103:   PetscFinalize();
104: #endif
105:   return 0;
106: }