Actual source code: pmetis.c
1: /*$Id: pmetis.c,v 1.46 2001/09/07 20:10:38 bsmith Exp $*/
2:
3: #include "src/mat/impls/adj/mpi/mpiadj.h" /*I "petscmat.h" I*/
5: /*
6: Currently using ParMetis-2.0. The following include file has
7: to be changed to par_kmetis.h for ParMetis-1.0
8: */
9: EXTERN_C_BEGIN
10: #include "parmetis.h"
11: EXTERN_C_END
13: /*
14: The first 5 elements of this structure are the input control array to Metis
15: */
16: typedef struct {
17: int cuts; /* number of cuts made (output) */
18: int foldfactor;
19: int parallel; /* use parallel partitioner for coarse problem */
20: int indexing; /* 0 indicates C indexing, 1 Fortran */
21: int printout; /* indicates if one wishes Metis to print info */
22: } MatPartitioning_Parmetis;
24: /*
25: Uses the ParMETIS parallel matrix partitioner to partition the matrix in parallel
26: */
27: static int MatPartitioningApply_Parmetis(MatPartitioning part,IS *partitioning)
28: {
29: int ierr,*locals,size,rank;
30: int *vtxdist,*xadj,*adjncy,itmp = 0;
31: Mat mat = part->adj,newmat;
32: Mat_MPIAdj *adj = (Mat_MPIAdj *)mat->data;
33: MatPartitioning_Parmetis *parmetis = (MatPartitioning_Parmetis*)part->data;
34: PetscTruth flg;
37: MPI_Comm_size(mat->comm,&size);
38: if (part->n != size) {
39: SETERRQ(PETSC_ERR_SUP,"Supports exactly one domain per processor");
40: }
42: PetscTypeCompare((PetscObject)mat,MATMPIADJ,&flg);
43: if (!flg) {
44: MatConvert(mat,MATMPIADJ,&newmat);
45: adj = (Mat_MPIAdj *)newmat->data;
46: }
48: vtxdist = adj->rowners;
49: xadj = adj->i;
50: adjncy = adj->j;
51: ierr = MPI_Comm_rank(part->comm,&rank);
52: if (!(vtxdist[rank+1] - vtxdist[rank])) {
53: SETERRQ(1,"Does not support any processor with no entries");
54: }
55: #if defined(PETSC_USE_BOPT_g)
56: /* check that matrix has no diagonal entries */
57: {
58: int i,j,rstart;
59: MatGetOwnershipRange(mat,&rstart,PETSC_NULL);
60: for (i=0; i<mat->m; i++) {
61: for (j=xadj[i]; j<xadj[i+1]; j++) {
62: if (adjncy[j] == i+rstart) SETERRQ1(1,"Row %d has illigal diagonal entry",i+rstart);
63: }
64: }
65: }
66: #endif
68: PetscMalloc((mat->m+1)*sizeof(int),&locals);
70: if (PetscLogPrintInfo) {itmp = parmetis->printout; parmetis->printout = 127;}
71: PARKMETIS(vtxdist,xadj,part->vertex_weights,adjncy,adj->values,locals,(int*)parmetis,part->comm);
72: if (PetscLogPrintInfo) {parmetis->printout = itmp;}
74: ISCreateGeneral(part->comm,mat->m,locals,partitioning);
75: PetscFree(locals);
77: if (!flg) {
78: MatDestroy(newmat);
79: }
80: return(0);
81: }
84: int MatPartitioningView_Parmetis(MatPartitioning part,PetscViewer viewer)
85: {
86: MatPartitioning_Parmetis *parmetis = (MatPartitioning_Parmetis *)part->data;
87: int ierr,rank;
88: PetscTruth isascii;
91: MPI_Comm_rank(part->comm,&rank);
92: PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&isascii);
93: if (isascii) {
94: if (parmetis->parallel == 2) {
95: PetscViewerASCIIPrintf(viewer," Using parallel coarse grid partitionern");
96: } else {
97: PetscViewerASCIIPrintf(viewer," Using sequential coarse grid partitionern");
98: }
99: PetscViewerASCIIPrintf(viewer," Using %d fold factorn",parmetis->foldfactor);
100: PetscViewerASCIISynchronizedPrintf(viewer," [%d]Number of cuts found %dn",rank,parmetis->cuts);
101: PetscViewerFlush(viewer);
102: } else {
103: SETERRQ1(1,"Viewer type %s not supported for this Parmetis partitioner",((PetscObject)viewer)->type_name);
104: }
106: return(0);
107: }
109: /*@
110: MatPartitioningParmetisSetCoarseSequential - Use the sequential code to
111: do the partitioning of the coarse grid.
113: Collective on MatPartitioning
115: Input Parameter:
116: . part - the partitioning context
118: Level: advanced
120: @*/
121: int MatPartitioningParmetisSetCoarseSequential(MatPartitioning part)
122: {
123: MatPartitioning_Parmetis *parmetis = (MatPartitioning_Parmetis *)part->data;
126: parmetis->parallel = 1;
127: return(0);
128: }
129: int MatPartitioningSetFromOptions_Parmetis(MatPartitioning part)
130: {
131: int ierr;
132: PetscTruth flag;
135: PetscOptionsHead("Set ParMeTiS partitioning options");
136: PetscOptionsName("-mat_partitioning_parmetis_coarse_sequential","Use sequential coarse partitioner","MatPartitioningParmetisSetCoarseSequential",&flag);
137: if (flag) {
138: MatPartitioningParmetisSetCoarseSequential(part);
139: }
140: PetscOptionsTail();
141: return(0);
142: }
145: int MatPartitioningDestroy_Parmetis(MatPartitioning part)
146: {
147: MatPartitioning_Parmetis *parmetis = (MatPartitioning_Parmetis *)part->data;
151: PetscFree(parmetis);
152: return(0);
153: }
155: EXTERN_C_BEGIN
156: int MatPartitioningCreate_Parmetis(MatPartitioning part)
157: {
158: int ierr;
159: MatPartitioning_Parmetis *parmetis;
162: ierr = PetscNew(MatPartitioning_Parmetis,&parmetis);
164: parmetis->cuts = 0; /* output variable */
165: parmetis->foldfactor = 150; /*folding factor */
166: parmetis->parallel = 2; /* use parallel partitioner for coarse grid */
167: parmetis->indexing = 0; /* index numbering starts from 0 */
168: parmetis->printout = 0; /* print no output while running */
170: part->ops->apply = MatPartitioningApply_Parmetis;
171: part->ops->view = MatPartitioningView_Parmetis;
172: part->ops->destroy = MatPartitioningDestroy_Parmetis;
173: part->ops->setfromoptions = MatPartitioningSetFromOptions_Parmetis;
174: part->data = (void*)parmetis;
175: return(0);
176: }
177: EXTERN_C_END