Actual source code: eige.c
1: /*$Id: eige.c,v 1.35 2001/08/07 03:03:45 balay Exp $*/
3: #include src/sles/ksp/kspimpl.h
5: /*@
6: KSPComputeExplicitOperator - Computes the explicit preconditioned operator.
8: Collective on KSP
10: Input Parameter:
11: . ksp - the Krylov subspace context
13: Output Parameter:
14: . mat - the explict preconditioned operator
16: Notes:
17: This computation is done by applying the operators to columns of the
18: identity matrix.
20: Currently, this routine uses a dense matrix format when 1 processor
21: is used and a sparse format otherwise. This routine is costly in general,
22: and is recommended for use only with relatively small systems.
24: Level: advanced
25:
26: .keywords: KSP, compute, explicit, operator
28: .seealso: KSPComputeEigenvaluesExplicitly()
29: @*/
30: int KSPComputeExplicitOperator(KSP ksp,Mat *mat)
31: {
32: Vec in,out;
33: int ierr,i,M,m,size,*rows,start,end;
34: Mat A;
35: MPI_Comm comm;
36: PetscScalar *array,zero = 0.0,one = 1.0;
41: comm = ksp->comm;
43: MPI_Comm_size(comm,&size);
45: VecDuplicate(ksp->vec_sol,&in);
46: VecDuplicate(ksp->vec_sol,&out);
47: VecGetSize(in,&M);
48: VecGetLocalSize(in,&m);
49: VecGetOwnershipRange(in,&start,&end);
50: PetscMalloc((m+1)*sizeof(int),&rows);
51: for (i=0; i<m; i++) {rows[i] = start + i;}
53: if (size == 1) {
54: MatCreateSeqDense(comm,M,M,PETSC_NULL,mat);
55: } else {
56: /* MatCreateMPIDense(comm,m,M,M,M,PETSC_NULL,mat); */
57: MatCreateMPIAIJ(comm,m,m,M,M,0,0,0,0,mat);
58: }
59:
60: PCGetOperators(ksp->B,&A,PETSC_NULL,PETSC_NULL);
62: for (i=0; i<M; i++) {
64: VecSet(&zero,in);
65: VecSetValues(in,1,&i,&one,INSERT_VALUES);
66: VecAssemblyBegin(in);
67: VecAssemblyEnd(in);
69: KSP_MatMult(ksp,A,in,out);
70: KSP_PCApply(ksp,ksp->B,out,in);
71:
72: VecGetArray(in,&array);
73: MatSetValues(*mat,m,rows,1,&i,array,INSERT_VALUES);
74: VecRestoreArray(in,&array);
76: }
77: PetscFree(rows);
78: VecDestroy(in);
79: VecDestroy(out);
80: MatAssemblyBegin(*mat,MAT_FINAL_ASSEMBLY);
81: MatAssemblyEnd(*mat,MAT_FINAL_ASSEMBLY);
82: return(0);
83: }
85: #include petscblaslapack.h
87: /*@
88: KSPComputeEigenvaluesExplicitly - Computes all of the eigenvalues of the
89: preconditioned operator using LAPACK.
91: Collective on KSP
93: Input Parameter:
94: + ksp - iterative context obtained from KSPCreate()
95: - n - size of arrays r and c
97: Output Parameters:
98: + r - real part of computed eigenvalues
99: - c - complex part of computed eigenvalues
101: Notes:
102: This approach is very slow but will generally provide accurate eigenvalue
103: estimates. This routine explicitly forms a dense matrix representing
104: the preconditioned operator, and thus will run only for relatively small
105: problems, say n < 500.
107: Many users may just want to use the monitoring routine
108: KSPSingularValueMonitor() (which can be set with option -ksp_singmonitor)
109: to print the singular values at each iteration of the linear solve.
111: Level: advanced
113: .keywords: KSP, compute, eigenvalues, explicitly
115: .seealso: KSPComputeEigenvalues(), KSPSingularValueMonitor(), KSPComputeExtremeSingularValues()
116: @*/
117: int KSPComputeEigenvaluesExplicitly(KSP ksp,int nmax,PetscReal *r,PetscReal *c)
118: {
119: Mat BA;
120: int i,n,ierr,size,rank,dummy;
121: MPI_Comm comm = ksp->comm;
122: PetscScalar *array;
123: Mat A;
124: int m,row,nz,*cols;
125: PetscScalar *vals;
128: KSPComputeExplicitOperator(ksp,&BA);
129: MPI_Comm_size(comm,&size);
130: MPI_Comm_rank(comm,&rank);
132: ierr = MatGetSize(BA,&n,&n);
133: if (size > 1) { /* assemble matrix on first processor */
134: if (!rank) {
135: MatCreateMPIDense(ksp->comm,n,n,n,n,PETSC_NULL,&A);
136: }
137: else {
138: MatCreateMPIDense(ksp->comm,0,n,n,n,PETSC_NULL,&A);
139: }
140: PetscLogObjectParent(BA,A);
142: MatGetOwnershipRange(BA,&row,&dummy);
143: MatGetLocalSize(BA,&m,&dummy);
144: for (i=0; i<m; i++) {
145: MatGetRow(BA,row,&nz,&cols,&vals);
146: MatSetValues(A,1,&row,nz,cols,vals,INSERT_VALUES);
147: MatRestoreRow(BA,row,&nz,&cols,&vals);
148: row++;
149: }
151: MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
152: MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
153: MatGetArray(A,&array);
154: } else {
155: MatGetArray(BA,&array);
156: }
158: #if defined(PETSC_HAVE_ESSL)
159: /* ESSL has a different calling sequence for dgeev() and zgeev() than standard LAPACK */
160: if (!rank) {
161: PetscScalar sdummy,*cwork;
162: PetscReal *work,*realpart;
163: int clen,idummy,lwork,*perm,zero;
165: #if !defined(PETSC_USE_COMPLEX)
166: clen = n;
167: #else
168: clen = 2*n;
169: #endif
170: ierr = PetscMalloc(clen*sizeof(PetscScalar),&cwork);
171: idummy = n;
172: lwork = 5*n;
173: ierr = PetscMalloc(lwork*sizeof(PetscReal),&work);
174: ierr = PetscMalloc(n*sizeof(PetscReal),&realpart);
175: zero = 0;
176: LAgeev_(&zero,array,&n,cwork,&sdummy,&idummy,&idummy,&n,work,&lwork);
177: PetscFree(work);
179: /* For now we stick with the convention of storing the real and imaginary
180: components of evalues separately. But is this what we really want? */
181: PetscMalloc(n*sizeof(int),&perm);
183: #if !defined(PETSC_USE_COMPLEX)
184: for (i=0; i<n; i++) {
185: realpart[i] = cwork[2*i];
186: perm[i] = i;
187: }
188: PetscSortRealWithPermutation(n,realpart,perm);
189: for (i=0; i<n; i++) {
190: r[i] = cwork[2*perm[i]];
191: c[i] = cwork[2*perm[i]+1];
192: }
193: #else
194: for (i=0; i<n; i++) {
195: realpart[i] = PetscRealPart(cwork[i]);
196: perm[i] = i;
197: }
198: PetscSortRealWithPermutation(n,realpart,perm);
199: for (i=0; i<n; i++) {
200: r[i] = PetscRealPart(cwork[perm[i]]);
201: c[i] = PetscImaginaryPart(cwork[perm[i]]);
202: }
203: #endif
204: PetscFree(perm);
205: PetscFree(realpart);
206: PetscFree(cwork);
207: }
208: #elif !defined(PETSC_USE_COMPLEX)
209: if (!rank) {
210: PetscScalar *work,sdummy;
211: PetscReal *realpart,*imagpart;
212: int idummy,lwork,*perm;
214: idummy = n;
215: lwork = 5*n;
216: ierr = PetscMalloc(2*n*sizeof(PetscReal),&realpart);
217: imagpart = realpart + n;
218: ierr = PetscMalloc(5*n*sizeof(PetscReal),&work);
219: #if defined(PETSC_MISSING_LAPACK_GEEV)
220: SETERRQ(PETSC_ERR_SUP,"GEEV - Lapack routine is unavilablenNot able to provide eigen values.");
221: #else
222: LAgeev_("N","N",&n,array,&n,realpart,imagpart,&sdummy,&idummy,&sdummy,&idummy,work,&lwork,&ierr);
223: #endif
224: if (ierr) SETERRQ1(PETSC_ERR_LIB,"Error in LAPACK routine %d",ierr);
225: PetscFree(work);
226: PetscMalloc(n*sizeof(int),&perm);
227: for (i=0; i<n; i++) { perm[i] = i;}
228: PetscSortRealWithPermutation(n,realpart,perm);
229: for (i=0; i<n; i++) {
230: r[i] = realpart[perm[i]];
231: c[i] = imagpart[perm[i]];
232: }
233: PetscFree(perm);
234: PetscFree(realpart);
235: }
236: #else
237: if (!rank) {
238: PetscScalar *work,sdummy,*eigs;
239: PetscReal *rwork;
240: int idummy,lwork,*perm;
242: idummy = n;
243: lwork = 5*n;
244: PetscMalloc(5*n*sizeof(PetscScalar),&work);
245: PetscMalloc(2*n*sizeof(PetscReal),&rwork);
246: PetscMalloc(n*sizeof(PetscScalar),&eigs);
247: #if defined(PETSC_MISSING_LAPACK_GEEV)
248: SETERRQ(PETSC_ERR_SUP,"GEEV - Lapack routine is unavilablenNot able to provide eigen values.");
249: #else
250: LAgeev_("N","N",&n,array,&n,eigs,&sdummy,&idummy,&sdummy,&idummy,work,&lwork,rwork,&ierr);
251: #endif
252: if (ierr) SETERRQ1(PETSC_ERR_LIB,"Error in LAPACK routine %d",ierr);
253: PetscFree(work);
254: PetscFree(rwork);
255: PetscMalloc(n*sizeof(int),&perm);
256: for (i=0; i<n; i++) { perm[i] = i;}
257: for (i=0; i<n; i++) { r[i] = PetscRealPart(eigs[i]);}
258: PetscSortRealWithPermutation(n,r,perm);
259: for (i=0; i<n; i++) {
260: r[i] = PetscRealPart(eigs[perm[i]]);
261: c[i] = PetscImaginaryPart(eigs[perm[i]]);
262: }
263: PetscFree(perm);
264: PetscFree(eigs);
265: }
266: #endif
267: if (size > 1) {
268: MatRestoreArray(A,&array);
269: MatDestroy(A);
270: } else {
271: MatRestoreArray(BA,&array);
272: }
273: MatDestroy(BA);
274: return(0);
275: }