Actual source code: da2.c

  1: /*$Id: da2.c,v 1.180 2001/09/07 20:12:17 bsmith Exp $*/
  2: 
 3:  #include src/dm/da/daimpl.h

  5: int DAGetOwnershipRange(DA da,int **lx,int **ly,int **lz)
  6: {
  9:   if (lx) *lx = da->lx;
 10:   if (ly) *ly = da->ly;
 11:   if (lz) *lz = da->lz;
 12:   return(0);
 13: }

 15: int DAView_2d(DA da,PetscViewer viewer)
 16: {
 17:   int        rank,ierr;
 18:   PetscTruth isascii,isdraw,isbinary;

 21:   MPI_Comm_rank(da->comm,&rank);

 23:   PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&isascii);
 24:   PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_DRAW,&isdraw);
 25:   PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_BINARY,&isbinary);
 26:   if (isascii) {
 27:     PetscViewerASCIISynchronizedPrintf(viewer,"Processor [%d] M %d N %d m %d n %d w %d s %dn",rank,da->M,
 28:                              da->N,da->m,da->n,da->w,da->s);
 29:     PetscViewerASCIISynchronizedPrintf(viewer,"X range of indices: %d %d, Y range of indices: %d %dn",da->xs,da->xe,da->ys,da->ye);
 30:     PetscViewerFlush(viewer);
 31:   } else if (isdraw) {
 32:     PetscDraw       draw;
 33:     double     ymin = -1*da->s-1,ymax = da->N+da->s;
 34:     double     xmin = -1*da->s-1,xmax = da->M+da->s;
 35:     double     x,y;
 36:     int        base,*idx;
 37:     char       node[10];
 38:     PetscTruth isnull;
 39: 
 40:     PetscViewerDrawGetDraw(viewer,0,&draw);
 41:     PetscDrawIsNull(draw,&isnull); if (isnull) return(0);
 42:     PetscDrawSetCoordinates(draw,xmin,ymin,xmax,ymax);
 43:     PetscDrawSynchronizedClear(draw);

 45:     /* first processor draw all node lines */
 46:     if (!rank) {
 47:       ymin = 0.0; ymax = da->N - 1;
 48:       for (xmin=0; xmin<da->M; xmin++) {
 49:         PetscDrawLine(draw,xmin,ymin,xmin,ymax,PETSC_DRAW_BLACK);
 50:       }
 51:       xmin = 0.0; xmax = da->M - 1;
 52:       for (ymin=0; ymin<da->N; ymin++) {
 53:         PetscDrawLine(draw,xmin,ymin,xmax,ymin,PETSC_DRAW_BLACK);
 54:       }
 55:     }
 56:     PetscDrawSynchronizedFlush(draw);
 57:     PetscDrawPause(draw);

 59:     /* draw my box */
 60:     ymin = da->ys; ymax = da->ye - 1; xmin = da->xs/da->w;
 61:     xmax =(da->xe-1)/da->w;
 62:     PetscDrawLine(draw,xmin,ymin,xmax,ymin,PETSC_DRAW_RED);
 63:     PetscDrawLine(draw,xmin,ymin,xmin,ymax,PETSC_DRAW_RED);
 64:     PetscDrawLine(draw,xmin,ymax,xmax,ymax,PETSC_DRAW_RED);
 65:     PetscDrawLine(draw,xmax,ymin,xmax,ymax,PETSC_DRAW_RED);

 67:     /* put in numbers */
 68:     base = (da->base)/da->w;
 69:     for (y=ymin; y<=ymax; y++) {
 70:       for (x=xmin; x<=xmax; x++) {
 71:         sprintf(node,"%d",base++);
 72:         PetscDrawString(draw,x,y,PETSC_DRAW_BLACK,node);
 73:       }
 74:     }

 76:     PetscDrawSynchronizedFlush(draw);
 77:     PetscDrawPause(draw);
 78:     /* overlay ghost numbers, useful for error checking */
 79:     /* put in numbers */

 81:     base = 0; idx = da->idx;
 82:     ymin = da->Ys; ymax = da->Ye; xmin = da->Xs; xmax = da->Xe;
 83:     for (y=ymin; y<ymax; y++) {
 84:       for (x=xmin; x<xmax; x++) {
 85:         if ((base % da->w) == 0) {
 86:           sprintf(node,"%d",idx[base]/da->w);
 87:           PetscDrawString(draw,x/da->w,y,PETSC_DRAW_BLUE,node);
 88:         }
 89:         base++;
 90:       }
 91:     }
 92:     PetscDrawSynchronizedFlush(draw);
 93:     PetscDrawPause(draw);
 94:   } else if (isbinary) {
 95:     DAView_Binary(da,viewer);
 96:   } else {
 97:     SETERRQ1(1,"Viewer type %s not supported for DA2d",((PetscObject)viewer)->type_name);
 98:   }
 99:   return(0);
100: }

102: #if defined(PETSC_HAVE_AMS)
103: /*
104:       This function tells the AMS the layout of the vectors, it is called
105:    in the VecPublish_xx routines.
106: */
107: EXTERN_C_BEGIN
108: int AMSSetFieldBlock_DA(AMS_Memory amem,char *name,Vec vec)
109: {
110:   int        ierr,dof,dim,ends[4],shift = 0,starts[] = {0,0,0,0};
111:   DA         da = 0;
112:   PetscTruth isseq,ismpi;

115:   if (((PetscObject)vec)->amem < 0) return(0); /* return if not published */

117:   PetscObjectQuery((PetscObject)vec,"DA",(PetscObject*)&da);
118:   if (!da) return(0);
119:   DAGetInfo(da,&dim,0,0,0,0,0,0,&dof,0,0,0);
120:   if (dof > 1) {dim++; shift = 1; ends[0] = dof;}

122:   PetscTypeCompare((PetscObject)vec,VECSEQ,&isseq);
123:   PetscTypeCompare((PetscObject)vec,VECMPI,&ismpi);
124:   if (isseq) {
125:     DAGetGhostCorners(da,0,0,0,ends+shift,ends+shift+1,ends+shift+2);
126:     ends[shift]   += starts[shift]-1;
127:     ends[shift+1] += starts[shift+1]-1;
128:     ends[shift+2] += starts[shift+2]-1;
129:     AMS_Memory_set_field_block(amem,name,dim,starts,ends);
130:     if (ierr) {
131:       char *message;
132:       AMS_Explain_error(ierr,&message);
133:       SETERRQ(ierr,message);
134:     }
135:   } else if (ismpi) {
136:     DAGetCorners(da,starts+shift,starts+shift+1,starts+shift+2,
137:                            ends+shift,ends+shift+1,ends+shift+2);
138:     ends[shift]   += starts[shift]-1;
139:     ends[shift+1] += starts[shift+1]-1;
140:     ends[shift+2] += starts[shift+2]-1;
141:     AMS_Memory_set_field_block(amem,name,dim,starts,ends);
142:     if (ierr) {
143:       char *message;
144:       AMS_Explain_error(ierr,&message);
145:       SETERRQ(ierr,message);
146:     }
147:   } else {
148:     SETERRQ1(1,"Wrong vector type %s for this call",((PetscObject)vec)->type_name);
149:   }

151:   return(0);
152: }
153: EXTERN_C_END
154: #endif

156: int DAPublish_Petsc(PetscObject obj)
157: {
158: #if defined(PETSC_HAVE_AMS)
159:   DA          v = (DA) obj;
160:   int         ierr;
161: #endif


165: #if defined(PETSC_HAVE_AMS)
166:   /* if it is already published then return */
167:   if (v->amem >=0) return(0);

169:   PetscObjectPublishBaseBegin(obj);
170:   PetscObjectPublishBaseEnd(obj);
171: #endif

173:   return(0);
174: }

176: /*
177:    This allows the DA vectors to properly tell Matlab their dimensions
178: */
179: #if defined(PETSC_HAVE_MATLAB_ENGINE) && !defined(PETSC_USE_COMPLEX) && !defined(PETSC_USE_SINGLE)
180: #include "engine.h"   /* Matlab include file */
181: #include "mex.h"      /* Matlab include file */
182: EXTERN_C_BEGIN
183: int VecMatlabEnginePut_DA2d(PetscObject obj,void *engine)
184: {
185:   int     ierr,n,m;
186:   Vec     vec = (Vec)obj;
187:   PetscScalar  *array;
188:   mxArray *mat;
189:   DA      da;

192:   PetscObjectQuery((PetscObject)vec,"DA",(PetscObject*)&da);
193:   if (!da) SETERRQ(1,"Vector not associated with a DA");
194:   DAGetGhostCorners(da,0,0,0,&m,&n,0);

196:   VecGetArray(vec,&array);
197: #if !defined(PETSC_USE_COMPLEX)
198:   mat  = mxCreateDoubleMatrix(m,n,mxREAL);
199: #else
200:   mat  = mxCreateDoubleMatrix(m,n,mxCOMPLEX);
201: #endif
202:   PetscMemcpy(mxGetPr(mat),array,n*m*sizeof(PetscScalar));
203:   PetscObjectName(obj);
204:   mxSetName(mat,obj->name);
205:   engPutArray((Engine *)engine,mat);
206: 
207:   VecRestoreArray(vec,&array);
208:   return(0);
209: }
210: EXTERN_C_END
211: #endif

213: /*@C
214:    DACreate2d -  Creates an object that will manage the communication of  two-dimensional 
215:    regular array data that is distributed across some processors.

217:    Collective on MPI_Comm

219:    Input Parameters:
220: +  comm - MPI communicator
221: .  wrap - type of periodicity should the array have. 
222:          Use one of DA_NONPERIODIC, DA_XPERIODIC, DA_YPERIODIC, or DA_XYPERIODIC.
223: .  stencil_type - stencil type.  Use either DA_STENCIL_BOX or DA_STENCIL_STAR.
224: .  M,N - global dimension in each direction of the array
225: .  m,n - corresponding number of processors in each dimension 
226:          (or PETSC_DECIDE to have calculated)
227: .  dof - number of degrees of freedom per node
228: .  s - stencil width
229: -  lx, ly - arrays containing the number of nodes in each cell along
230:            the x and y coordinates, or PETSC_NULL. If non-null, these
231:            must be of length as m and n, and the corresponding
232:            m and n cannot be PETSC_DECIDE. The sum of the lx[] entries
233:            must be M, and the sum of the ly[] entries must be N.

235:    Output Parameter:
236: .  inra - the resulting distributed array object

238:    Options Database Key:
239: +  -da_view - Calls DAView() at the conclusion of DACreate2d()
240: .  -da_grid_x <nx> - number of grid points in x direction, if M < 0
241: .  -da_grid_y <ny> - number of grid points in y direction, if N < 0
242: .  -da_processors_x <nx> - number of processors in x direction
243: -  -da_processors_y <ny> - number of processors in y direction

245:    Level: beginner

247:    Notes:
248:    The stencil type DA_STENCIL_STAR with width 1 corresponds to the 
249:    standard 5-pt stencil, while DA_STENCIL_BOX with width 1 denotes
250:    the standard 9-pt stencil.

252:    The array data itself is NOT stored in the DA, it is stored in Vec objects;
253:    The appropriate vector objects can be obtained with calls to DACreateGlobalVector()
254:    and DACreateLocalVector() and calls to VecDuplicate() if more are needed.

256: .keywords: distributed array, create, two-dimensional

258: .seealso: DADestroy(), DAView(), DACreate1d(), DACreate3d(), DAGlobalToLocalBegin(),
259:           DAGlobalToLocalEnd(), DALocalToGlobal(), DALocalToLocalBegin(), DALocalToLocalEnd(),
260:           DAGetInfo(), DACreateGlobalVector(), DACreateLocalVector(), DACreateNaturalVector(), DALoad(), DAView()

262: @*/
263: int DACreate2d(MPI_Comm comm,DAPeriodicType wrap,DAStencilType stencil_type,
264:                 int M,int N,int m,int n,int dof,int s,int *lx,int *ly,DA *inra)
265: {
266:   int           rank,size,xs,xe,ys,ye,x,y,Xs,Xe,Ys,Ye,ierr,start,end;
267:   int           up,down,left,i,n0,n1,n2,n3,n5,n6,n7,n8,*idx,nn;
268:   int           xbase,*bases,*ldims,j,x_t,y_t,s_t,base,count;
269:   int           s_x,s_y; /* s proportionalized to w */
270:   int           *flx = 0,*fly = 0;
271:   int           sn0 = 0,sn2 = 0,sn6 = 0,sn8 = 0,refine_x = 2, refine_y = 2,tM = M,tN = N;
272:   PetscTruth    flg1,flg2;
273:   DA            da;
274:   Vec           local,global;
275:   VecScatter    ltog,gtol;
276:   IS            to,from;

280:   *inra = 0;
281: #ifndef PETSC_USE_DYNAMIC_LIBRARIES
282:   DMInitializePackage(PETSC_NULL);
283: #endif

285:   if (dof < 1) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"Must have 1 or more degrees of freedom per node: %d",dof);
286:   if (s < 0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"Stencil width cannot be negative: %d",s);

288:   PetscOptionsBegin(comm,PETSC_NULL,"2d DA Options","DA");
289:     if (M < 0){
290:       tM = -M;
291:       PetscOptionsInt("-da_grid_x","Number of grid points in x direction","DACreate2d",tM,&tM,PETSC_NULL);
292:     }
293:     if (N < 0){
294:       tN = -N;
295:       PetscOptionsInt("-da_grid_y","Number of grid points in y direction","DACreate2d",tN,&tN,PETSC_NULL);
296:     }
297:     PetscOptionsInt("-da_processors_x","Number of processors in x direction","DACreate2d",m,&m,PETSC_NULL);
298:     PetscOptionsInt("-da_processors_y","Number of processors in y direction","DACreate2d",n,&n,PETSC_NULL);
299:     PetscOptionsInt("-da_refine_x","Refinement ratio in x direction","DACreate2d",refine_x,&refine_x,PETSC_NULL);
300:     PetscOptionsInt("-da_refine_y","Refinement ratio in y direction","DACreate2d",refine_y,&refine_y,PETSC_NULL);
301:   PetscOptionsEnd();
302:   M = tM; N = tN;

304:   PetscHeaderCreate(da,_p_DA,struct _DAOps,DA_COOKIE,0,"DA",comm,DADestroy,DAView);
305:   PetscLogObjectCreate(da);
306:   da->bops->publish           = DAPublish_Petsc;
307:   da->ops->createglobalvector = DACreateGlobalVector;
308:   da->ops->getinterpolation   = DAGetInterpolation;
309:   da->ops->getcoloring        = DAGetColoring;
310:   da->ops->getmatrix          = DAGetMatrix;
311:   da->ops->refine             = DARefine;
312:   PetscLogObjectMemory(da,sizeof(struct _p_DA));
313:   da->dim        = 2;
314:   da->interptype = DA_Q1;
315:   da->refine_x   = refine_x;
316:   da->refine_y   = refine_y;
317:   PetscMalloc(dof*sizeof(char*),&da->fieldname);
318:   PetscMemzero(da->fieldname,dof*sizeof(char*));

320:   MPI_Comm_size(comm,&size);
321:   MPI_Comm_rank(comm,&rank);

323:   if (m != PETSC_DECIDE) {
324:     if (m < 1) {SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"Non-positive number of processors in X direction: %d",m);}
325:     else if (m > size) {SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Too many processors in X direction: %d %d",m,size);}
326:   }
327:   if (n != PETSC_DECIDE) {
328:     if (n < 1) {SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"Non-positive number of processors in Y direction: %d",n);}
329:     else if (n > size) {SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Too many processors in Y direction: %d %d",n,size);}
330:   }

332:   if (m == PETSC_DECIDE || n == PETSC_DECIDE) {
333:     /* try for squarish distribution */
334:     /* This should use MPI_Dims_create instead */
335:     m = (int)(0.5 + sqrt(((double)M)*((double)size)/((double)N)));
336:     if (!m) m = 1;
337:     while (m > 0) {
338:       n = size/m;
339:       if (m*n == size) break;
340:       m--;
341:     }
342:     if (M > N && m < n) {int _m = m; m = n; n = _m;}
343:     if (m*n != size) SETERRQ(PETSC_ERR_PLIB,"Internally Created Bad Partition");
344:   } else if (m*n != size) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Given Bad partition");

346:   if (M < m) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Partition in x direction is too fine! %d %d",M,m);
347:   if (N < n) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Partition in y direction is too fine! %d %d",N,n);

349:   /*
350:      We should create an MPI Cartesian topology here, with reorder
351:      set to true.  That would create a NEW communicator that we would
352:      need to use for operations on this distributed array 
353:   */
354:   PetscOptionsHasName(PETSC_NULL,"-da_partition_nodes_at_end",&flg2);

356:   /* 
357:      Determine locally owned region 
358:      xs is the first local node number, x is the number of local nodes 
359:   */
360:   if (lx) { /* user sets distribution */
361:     x  = lx[rank % m];
362:     xs = 0;
363:     for (i=0; i<(rank % m); i++) {
364:       xs += lx[i];
365:     }
366:     left = xs;
367:     for (i=(rank % m); i<m; i++) {
368:       left += lx[i];
369:     }
370:     if (left != M) {
371:       SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Sum of lx across processors not equal to M: %d %d",left,M);
372:     }
373:   } else if (flg2) {
374:     x = (M + rank%m)/m;
375:     if (x < s) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Column width is too thin for stencil! %d %d",x,s);
376:     if (M/m == x) { xs = (rank % m)*x; }
377:     else          { xs = (rank % m)*(x-1) + (M+(rank % m))%(x*m); }
378:     SETERRQ(PETSC_ERR_SUP,"-da_partition_nodes_at_end not supported");
379:   } else { /* Normal PETSc distribution */
380:     x = M/m + ((M % m) > (rank % m));
381:     if (x < s) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Column width is too thin for stencil! %d %d",x,s);
382:     if ((M % m) > (rank % m)) { xs = (rank % m)*x; }
383:     else                      { xs = (M % m)*(x+1) + ((rank % m)-(M % m))*x; }
384:     PetscMalloc(m*sizeof(int),&lx);
385:     flx = lx;
386:     for (i=0; i<m; i++) {
387:       lx[i] = M/m + ((M % m) > i);
388:     }
389:   }

391:   /* 
392:      Determine locally owned region 
393:      ys is the first local node number, y is the number of local nodes 
394:   */
395:   if (ly) { /* user sets distribution */
396:     y  = ly[rank/m];
397:     ys = 0;
398:     for (i=0; i<(rank/m); i++) {
399:       ys += ly[i];
400:     }
401:     left = ys;
402:     for (i=(rank/m); i<n; i++) {
403:       left += ly[i];
404:     }
405:     if (left != N) {
406:       SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Sum of ly across processors not equal to N: %d %d",left,N);
407:     }
408:   } else if (flg2) {
409:     y = (N + rank/m)/n;
410:     if (y < s) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Row width is too thin for stencil! %d %d",y,s);
411:     if (N/n == y) { ys = (rank/m)*y;  }
412:     else          { ys = (rank/m)*(y-1) + (N+(rank/m))%(y*n); }
413:     SETERRQ(PETSC_ERR_SUP,"-da_partition_nodes_at_end not supported");
414:   } else { /* Normal PETSc distribution */
415:     y = N/n + ((N % n) > (rank/m));
416:     if (y < s) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Row width is too thin for stencil! %d %d",y,s);
417:     if ((N % n) > (rank/m)) { ys = (rank/m)*y; }
418:     else                    { ys = (N % n)*(y+1) + ((rank/m)-(N % n))*y; }
419:     PetscMalloc(n*sizeof(int),&ly);
420:     fly  = ly;
421:     for (i=0; i<n; i++) {
422:       ly[i] = N/n + ((N % n) > i);
423:     }
424:   }

426:   xe = xs + x;
427:   ye = ys + y;

429:   /* determine ghost region */
430:   /* Assume No Periodicity */
431:   if (xs-s > 0) Xs = xs - s; else Xs = 0;
432:   if (ys-s > 0) Ys = ys - s; else Ys = 0;
433:   if (xe+s <= M) Xe = xe + s; else Xe = M;
434:   if (ye+s <= N) Ye = ye + s; else Ye = N;

436:   /* X Periodic */
437:   if (DAXPeriodic(wrap)){
438:     Xs = xs - s;
439:     Xe = xe + s;
440:   }

442:   /* Y Periodic */
443:   if (DAYPeriodic(wrap)){
444:     Ys = ys - s;
445:     Ye = ye + s;
446:   }

448:   /* Resize all X parameters to reflect w */
449:   x   *= dof;
450:   xs  *= dof;
451:   xe  *= dof;
452:   Xs  *= dof;
453:   Xe  *= dof;
454:   s_x = s*dof;
455:   s_y = s;

457:   /* determine starting point of each processor */
458:   nn = x*y;
459:   PetscMalloc((2*size+1)*sizeof(int),&bases);
460:   ldims = (int*)(bases+size+1);
461:   MPI_Allgather(&nn,1,MPI_INT,ldims,1,MPI_INT,comm);
462:   bases[0] = 0;
463:   for (i=1; i<=size; i++) {
464:     bases[i] = ldims[i-1];
465:   }
466:   for (i=1; i<=size; i++) {
467:     bases[i] += bases[i-1];
468:   }

470:   /* allocate the base parallel and sequential vectors */
471:   VecCreateMPI(comm,x*y,PETSC_DECIDE,&global);
472:   VecSetBlockSize(global,dof);
473:   VecCreateSeq(PETSC_COMM_SELF,(Xe-Xs)*(Ye-Ys),&local);
474:   VecSetBlockSize(local,dof);


477:   /* generate appropriate vector scatters */
478:   /* local to global inserts non-ghost point region into global */
479:   VecGetOwnershipRange(global,&start,&end);
480:   ISCreateStride(comm,x*y,start,1,&to);

482:   left  = xs - Xs; down  = ys - Ys; up    = down + y;
483:   PetscMalloc(x*(up - down)*sizeof(int),&idx);
484:   count = 0;
485:   for (i=down; i<up; i++) {
486:     for (j=0; j<x; j++) {
487:       idx[count++] = left + i*(Xe-Xs) + j;
488:     }
489:   }
490:   ISCreateGeneral(comm,count,idx,&from);
491:   PetscFree(idx);

493:   VecScatterCreate(local,from,global,to,&ltog);
494:   PetscLogObjectParent(da,to);
495:   PetscLogObjectParent(da,from);
496:   PetscLogObjectParent(da,ltog);
497:   ISDestroy(from);
498:   ISDestroy(to);

500:   /* global to local must include ghost points */
501:   if (stencil_type == DA_STENCIL_BOX) {
502:     ISCreateStride(comm,(Xe-Xs)*(Ye-Ys),0,1,&to);
503:   } else {
504:     /* must drop into cross shape region */
505:     /*       ---------|
506:             |  top    |
507:          |---         ---|
508:          |   middle      |
509:          |               |
510:          ----         ----
511:             | bottom  |
512:             -----------
513:         Xs xs        xe  Xe */
514:     /* bottom */
515:     left  = xs - Xs; down = ys - Ys; up    = down + y;
516:     count = down*(xe-xs) + (up-down)*(Xe-Xs) + (Ye-Ys-up)*(xe-xs);
517:     ierr  = PetscMalloc(count*sizeof(int),&idx);
518:     count = 0;
519:     for (i=0; i<down; i++) {
520:       for (j=0; j<xe-xs; j++) {
521:         idx[count++] = left + i*(Xe-Xs) + j;
522:       }
523:     }
524:     /* middle */
525:     for (i=down; i<up; i++) {
526:       for (j=0; j<Xe-Xs; j++) {
527:         idx[count++] = i*(Xe-Xs) + j;
528:       }
529:     }
530:     /* top */
531:     for (i=up; i<Ye-Ys; i++) {
532:       for (j=0; j<xe-xs; j++) {
533:         idx[count++] = left + i*(Xe-Xs) + j;
534:       }
535:     }
536:     ISCreateGeneral(comm,count,idx,&to);
537:     PetscFree(idx);
538:   }


541:   /* determine who lies on each side of us stored in    n6 n7 n8
542:                                                         n3    n5
543:                                                         n0 n1 n2
544:   */

546:   /* Assume the Non-Periodic Case */
547:   n1 = rank - m;
548:   if (rank % m) {
549:     n0 = n1 - 1;
550:   } else {
551:     n0 = -1;
552:   }
553:   if ((rank+1) % m) {
554:     n2 = n1 + 1;
555:     n5 = rank + 1;
556:     n8 = rank + m + 1; if (n8 >= m*n) n8 = -1;
557:   } else {
558:     n2 = -1; n5 = -1; n8 = -1;
559:   }
560:   if (rank % m) {
561:     n3 = rank - 1;
562:     n6 = n3 + m; if (n6 >= m*n) n6 = -1;
563:   } else {
564:     n3 = -1; n6 = -1;
565:   }
566:   n7 = rank + m; if (n7 >= m*n) n7 = -1;


569:   /* Modify for Periodic Cases */
570:   if (wrap == DA_YPERIODIC) {  /* Handle Top and Bottom Sides */
571:     if (n1 < 0) n1 = rank + m * (n-1);
572:     if (n7 < 0) n7 = rank - m * (n-1);
573:     if ((n3 >= 0) && (n0 < 0)) n0 = size - m + rank - 1;
574:     if ((n3 >= 0) && (n6 < 0)) n6 = (rank%m)-1;
575:     if ((n5 >= 0) && (n2 < 0)) n2 = size - m + rank + 1;
576:     if ((n5 >= 0) && (n8 < 0)) n8 = (rank%m)+1;
577:   } else if (wrap == DA_XPERIODIC) { /* Handle Left and Right Sides */
578:     if (n3 < 0) n3 = rank + (m-1);
579:     if (n5 < 0) n5 = rank - (m-1);
580:     if ((n1 >= 0) && (n0 < 0)) n0 = rank-1;
581:     if ((n1 >= 0) && (n2 < 0)) n2 = rank-2*m+1;
582:     if ((n7 >= 0) && (n6 < 0)) n6 = rank+2*m-1;
583:     if ((n7 >= 0) && (n8 < 0)) n8 = rank+1;
584:   } else if (wrap == DA_XYPERIODIC) {

586:     /* Handle all four corners */
587:     if ((n6 < 0) && (n7 < 0) && (n3 < 0)) n6 = m-1;
588:     if ((n8 < 0) && (n7 < 0) && (n5 < 0)) n8 = 0;
589:     if ((n2 < 0) && (n5 < 0) && (n1 < 0)) n2 = size-m;
590:     if ((n0 < 0) && (n3 < 0) && (n1 < 0)) n0 = size-1;

592:     /* Handle Top and Bottom Sides */
593:     if (n1 < 0) n1 = rank + m * (n-1);
594:     if (n7 < 0) n7 = rank - m * (n-1);
595:     if ((n3 >= 0) && (n0 < 0)) n0 = size - m + rank - 1;
596:     if ((n3 >= 0) && (n6 < 0)) n6 = (rank%m)-1;
597:     if ((n5 >= 0) && (n2 < 0)) n2 = size - m + rank + 1;
598:     if ((n5 >= 0) && (n8 < 0)) n8 = (rank%m)+1;

600:     /* Handle Left and Right Sides */
601:     if (n3 < 0) n3 = rank + (m-1);
602:     if (n5 < 0) n5 = rank - (m-1);
603:     if ((n1 >= 0) && (n0 < 0)) n0 = rank-1;
604:     if ((n1 >= 0) && (n2 < 0)) n2 = rank-2*m+1;
605:     if ((n7 >= 0) && (n6 < 0)) n6 = rank+2*m-1;
606:     if ((n7 >= 0) && (n8 < 0)) n8 = rank+1;
607:   }

609:   if (stencil_type == DA_STENCIL_STAR) {
610:     /* save corner processor numbers */
611:     sn0 = n0; sn2 = n2; sn6 = n6; sn8 = n8;
612:     n0 = n2 = n6 = n8 = -1;
613:   }

615:   PetscMalloc((x+2*s_x)*(y+2*s_y)*sizeof(int),&idx);
616:   PetscLogObjectMemory(da,(x+2*s_x)*(y+2*s_y)*sizeof(int));
617:   nn = 0;

619:   xbase = bases[rank];
620:   for (i=1; i<=s_y; i++) {
621:     if (n0 >= 0) { /* left below */
622:       x_t = lx[n0 % m]*dof;
623:       y_t = ly[(n0/m)];
624:       s_t = bases[n0] + x_t*y_t - (s_y-i)*x_t - s_x;
625:       for (j=0; j<s_x; j++) { idx[nn++] = s_t++;}
626:     }
627:     if (n1 >= 0) { /* directly below */
628:       x_t = x;
629:       y_t = ly[(n1/m)];
630:       s_t = bases[n1] + x_t*y_t - (s_y+1-i)*x_t;
631:       for (j=0; j<x_t; j++) { idx[nn++] = s_t++;}
632:     }
633:     if (n2 >= 0) { /* right below */
634:       x_t = lx[n2 % m]*dof;
635:       y_t = ly[(n2/m)];
636:       s_t = bases[n2] + x_t*y_t - (s_y+1-i)*x_t;
637:       for (j=0; j<s_x; j++) { idx[nn++] = s_t++;}
638:     }
639:   }

641:   for (i=0; i<y; i++) {
642:     if (n3 >= 0) { /* directly left */
643:       x_t = lx[n3 % m]*dof;
644:       /* y_t = y; */
645:       s_t = bases[n3] + (i+1)*x_t - s_x;
646:       for (j=0; j<s_x; j++) { idx[nn++] = s_t++;}
647:     }

649:     for (j=0; j<x; j++) { idx[nn++] = xbase++; } /* interior */

651:     if (n5 >= 0) { /* directly right */
652:       x_t = lx[n5 % m]*dof;
653:       /* y_t = y; */
654:       s_t = bases[n5] + (i)*x_t;
655:       for (j=0; j<s_x; j++) { idx[nn++] = s_t++;}
656:     }
657:   }

659:   for (i=1; i<=s_y; i++) {
660:     if (n6 >= 0) { /* left above */
661:       x_t = lx[n6 % m]*dof;
662:       /* y_t = ly[(n6/m)]; */
663:       s_t = bases[n6] + (i)*x_t - s_x;
664:       for (j=0; j<s_x; j++) { idx[nn++] = s_t++;}
665:     }
666:     if (n7 >= 0) { /* directly above */
667:       x_t = x;
668:       /* y_t = ly[(n7/m)]; */
669:       s_t = bases[n7] + (i-1)*x_t;
670:       for (j=0; j<x_t; j++) { idx[nn++] = s_t++;}
671:     }
672:     if (n8 >= 0) { /* right above */
673:       x_t = lx[n8 % m]*dof;
674:       /* y_t = ly[(n8/m)]; */
675:       s_t = bases[n8] + (i-1)*x_t;
676:       for (j=0; j<s_x; j++) { idx[nn++] = s_t++;}
677:     }
678:   }

680:   base = bases[rank];
681:   ISCreateGeneral(comm,nn,idx,&from);
682:   VecScatterCreate(global,from,local,to,&gtol);
683:   PetscLogObjectParent(da,to);
684:   PetscLogObjectParent(da,from);
685:   PetscLogObjectParent(da,gtol);
686:   ISDestroy(to);
687:   ISDestroy(from);

689:   if (stencil_type == DA_STENCIL_STAR) {
690:     /*
691:         Recompute the local to global mappings, this time keeping the 
692:       information about the cross corner processor numbers.
693:     */
694:     n0 = sn0; n2 = sn2; n6 = sn6; n8 = sn8;
695:     nn = 0;
696:     xbase = bases[rank];
697:     for (i=1; i<=s_y; i++) {
698:       if (n0 >= 0) { /* left below */
699:         x_t = lx[n0 % m]*dof;
700:         y_t = ly[(n0/m)];
701:         s_t = bases[n0] + x_t*y_t - (s_y-i)*x_t - s_x;
702:         for (j=0; j<s_x; j++) { idx[nn++] = s_t++;}
703:       }
704:       if (n1 >= 0) { /* directly below */
705:         x_t = x;
706:         y_t = ly[(n1/m)];
707:         s_t = bases[n1] + x_t*y_t - (s_y+1-i)*x_t;
708:         for (j=0; j<x_t; j++) { idx[nn++] = s_t++;}
709:       }
710:       if (n2 >= 0) { /* right below */
711:         x_t = lx[n2 % m]*dof;
712:         y_t = ly[(n2/m)];
713:         s_t = bases[n2] + x_t*y_t - (s_y+1-i)*x_t;
714:         for (j=0; j<s_x; j++) { idx[nn++] = s_t++;}
715:       }
716:     }

718:     for (i=0; i<y; i++) {
719:       if (n3 >= 0) { /* directly left */
720:         x_t = lx[n3 % m]*dof;
721:         /* y_t = y; */
722:         s_t = bases[n3] + (i+1)*x_t - s_x;
723:         for (j=0; j<s_x; j++) { idx[nn++] = s_t++;}
724:       }

726:       for (j=0; j<x; j++) { idx[nn++] = xbase++; } /* interior */

728:       if (n5 >= 0) { /* directly right */
729:         x_t = lx[n5 % m]*dof;
730:         /* y_t = y; */
731:         s_t = bases[n5] + (i)*x_t;
732:         for (j=0; j<s_x; j++) { idx[nn++] = s_t++;}
733:       }
734:     }

736:     for (i=1; i<=s_y; i++) {
737:       if (n6 >= 0) { /* left above */
738:         x_t = lx[n6 % m]*dof;
739:         /* y_t = ly[(n6/m)]; */
740:         s_t = bases[n6] + (i)*x_t - s_x;
741:         for (j=0; j<s_x; j++) { idx[nn++] = s_t++;}
742:       }
743:       if (n7 >= 0) { /* directly above */
744:         x_t = x;
745:         /* y_t = ly[(n7/m)]; */
746:         s_t = bases[n7] + (i-1)*x_t;
747:         for (j=0; j<x_t; j++) { idx[nn++] = s_t++;}
748:       }
749:       if (n8 >= 0) { /* right above */
750:         x_t = lx[n8 % m]*dof;
751:         /* y_t = ly[(n8/m)]; */
752:         s_t = bases[n8] + (i-1)*x_t;
753:         for (j=0; j<s_x; j++) { idx[nn++] = s_t++;}
754:       }
755:     }
756:   }
757:   PetscFree(bases);

759:   da->M  = M;  da->N  = N;  da->m  = m;  da->n  = n;  da->w = dof;  da->s = s;
760:   da->xs = xs; da->xe = xe; da->ys = ys; da->ye = ye; da->zs = 0; da->ze = 1;
761:   da->Xs = Xs; da->Xe = Xe; da->Ys = Ys; da->Ye = Ye; da->Zs = 0; da->Ze = 1;
762:   da->P  = 1;  da->p  = 1;

764:   PetscLogObjectParent(da,global);
765:   PetscLogObjectParent(da,local);

767:   da->global       = global;
768:   da->local        = local;
769:   da->gtol         = gtol;
770:   da->ltog         = ltog;
771:   da->idx          = idx;
772:   da->Nl           = nn;
773:   da->base         = base;
774:   da->wrap         = wrap;
775:   da->ops->view    = DAView_2d;
776:   da->stencil_type = stencil_type;

778:   /* 
779:      Set the local to global ordering in the global vector, this allows use
780:      of VecSetValuesLocal().
781:   */
782:   ISLocalToGlobalMappingCreateNC(comm,nn,idx,&da->ltogmap);
783:   VecSetLocalToGlobalMapping(da->global,da->ltogmap);
784:   ISLocalToGlobalMappingBlock(da->ltogmap,da->w,&da->ltogmapb);
785:   VecSetLocalToGlobalMappingBlock(da->global,da->ltogmapb);
786:   PetscLogObjectParent(da,da->ltogmap);

788:   *inra = da;

790:   da->ltol = PETSC_NULL;
791:   da->ao   = PETSC_NULL;


794:   if (!flx) {
795:     PetscMalloc(m*sizeof(int),&flx);
796:     PetscMemcpy(flx,lx,m*sizeof(int));
797:   }
798:   if (!fly) {
799:     PetscMalloc(n*sizeof(int),&fly);
800:     PetscMemcpy(fly,ly,n*sizeof(int));
801:   }
802:   da->lx = flx;
803:   da->ly = fly;

805:   PetscOptionsHasName(PETSC_NULL,"-da_view",&flg1);
806:   if (flg1) {DAView(da,PETSC_VIEWER_STDOUT_(da->comm));}
807:   PetscOptionsHasName(PETSC_NULL,"-da_view_draw",&flg1);
808:   if (flg1) {DAView(da,PETSC_VIEWER_DRAW_(da->comm));}
809:   PetscOptionsHasName(PETSC_NULL,"-help",&flg1);
810:   if (flg1) {DAPrintHelp(da);}

812:   PetscPublishAll(da);
813: #if defined(PETSC_HAVE_AMS)
814:   PetscObjectComposeFunctionDynamic((PetscObject)global,"AMSSetFieldBlock_C",
815:          "AMSSetFieldBlock_DA",AMSSetFieldBlock_DA);
816:   PetscObjectComposeFunctionDynamic((PetscObject)local,"AMSSetFieldBlock_C",
817:          "AMSSetFieldBlock_DA",AMSSetFieldBlock_DA);
818:   if (((PetscObject)global)->amem > -1) {
819:     AMSSetFieldBlock_DA(((PetscObject)global)->amem,"values",global);
820:   }
821: #endif
822: #if defined(PETSC_HAVE_MATLAB_ENGINE) && !defined(PETSC_USE_COMPLEX) && !defined(PETSC_USE_SINGLE)
823:   if (dof == 1) {
824:     PetscObjectComposeFunctionDynamic((PetscObject)local,"PetscMatlabEnginePut_C","VecMatlabEnginePut_DA2d",VecMatlabEnginePut_DA2d);
825:   }
826: #endif
827:   VecSetOperation(global,VECOP_VIEW,(void(*)(void))VecView_MPI_DA);
828:   VecSetOperation(global,VECOP_LOADINTOVECTOR,(void(*)(void))VecLoadIntoVector_Binary_DA);
829:   return(0);
830: }

832: /*@
833:    DAPrintHelp - Prints command line options for DA.

835:    Collective on DA

837:    Input Parameters:
838: .  da - the distributed array

840:    Level: intermediate

842: .seealso: DACreate1d(), DACreate2d(), DACreate3d()

844: .keywords: DA, help

846: @*/
847: int DAPrintHelp(DA da)
848: {
849:   static PetscTruth called = PETSC_FALSE;
850:   MPI_Comm          comm;
851:   int               ierr;


856:   comm = da->comm;
857:   if (!called) {
858:     (*PetscHelpPrintf)(comm,"General Distributed Array (DA) options:n");
859:     (*PetscHelpPrintf)(comm,"  -da_view: print DA distribution to screenn");
860:     (*PetscHelpPrintf)(comm,"  -da_view_draw: display DA in windown");
861:     called = PETSC_TRUE;
862:   }
863:   return(0);
864: }

866: /*@C
867:    DARefine - Creates a new distributed array that is a refinement of a given
868:    distributed array.

870:    Collective on DA

872:    Input Parameter:
873: +  da - initial distributed array
874: -  comm - communicator to contain refined DA, must be either same as the da communicator or include the 
875:           da communicator and be 2, 4, or 8 times larger. Currently ignored

877:    Output Parameter:
878: .  daref - refined distributed array

880:    Level: advanced

882:    Note:
883:    Currently, refinement consists of just doubling the number of grid spaces
884:    in each dimension of the DA.

886: .keywords:  distributed array, refine

888: .seealso: DACreate1d(), DACreate2d(), DACreate3d(), DADestroy()
889: @*/
890: int DARefine(DA da,MPI_Comm comm,DA *daref)
891: {
892:   int M,N,P,ierr;
893:   DA  da2;


898:   if (DAXPeriodic(da->wrap) || da->interptype == DA_Q0){
899:     M = da->refine_x*da->M;
900:   } else {
901:     M = 1 + da->refine_x*(da->M - 1);
902:   }
903:   if (DAYPeriodic(da->wrap) || da->interptype == DA_Q0){
904:     N = da->refine_y*da->N;
905:   } else {
906:     N = 1 + da->refine_y*(da->N - 1);
907:   }
908:   if (DAZPeriodic(da->wrap) || da->interptype == DA_Q0){
909:     P = da->refine_z*da->P;
910:   } else {
911:     P = 1 + da->refine_z*(da->P - 1);
912:   }
913:   if (da->dim == 1) {
914:     DACreate1d(da->comm,da->wrap,M,da->w,da->s,PETSC_NULL,&da2);
915:   } else if (da->dim == 2) {
916:     DACreate2d(da->comm,da->wrap,da->stencil_type,M,N,da->m,da->n,da->w,da->s,PETSC_NULL,PETSC_NULL,&da2);
917:   } else if (da->dim == 3) {
918:     DACreate3d(da->comm,da->wrap,da->stencil_type,M,N,P,da->m,da->n,da->p,da->w,da->s,0,0,0,&da2);
919:   }
920:   *daref = da2;
921:   return(0);
922: }

924: /*
925:       M is number of grid points 
926:       m is number of processors

928: */
929: int DASplitComm2d(MPI_Comm comm,int M,int N,int sw,MPI_Comm *outcomm)
930: {
931:   int ierr,m,n = 0,csize,size,rank,x = 0,y = 0;

934:   MPI_Comm_size(comm,&size);
935:   MPI_Comm_rank(comm,&rank);

937:   csize = 4*size;
938:   do {
939:     if (csize % 4) SETERRQ4(1,"Cannot split communicator of size %d tried %d %d %d",size,csize,x,y);
940:     csize   = csize/4;
941: 
942:     m = (int)(0.5 + sqrt(((double)M)*((double)csize)/((double)N)));
943:     if (!m) m = 1;
944:     while (m > 0) {
945:       n = csize/m;
946:       if (m*n == csize) break;
947:       m--;
948:     }
949:     if (M > N && m < n) {int _m = m; m = n; n = _m;}

951:     x = M/m + ((M % m) > ((csize-1) % m));
952:     y = (N + (csize-1)/m)/n;
953:   } while ((x < 4 || y < 4) && csize > 1);
954:   if (size != csize) {
955:     MPI_Group entire_group,sub_group;
956:     int       i,*groupies;

958:     ierr     = MPI_Comm_group(comm,&entire_group);
959:     PetscMalloc(csize*sizeof(int),&groupies);
960:     for (i=0; i<csize; i++) {
961:       groupies[i] = (rank/csize)*csize + i;
962:     }
963:     ierr     = MPI_Group_incl(entire_group,csize,groupies,&sub_group);
964:     ierr     = PetscFree(groupies);
965:     ierr     = MPI_Comm_create(comm,sub_group,outcomm);
966:     ierr     = MPI_Group_free(&entire_group);
967:     ierr     = MPI_Group_free(&sub_group);
968:     PetscLogInfo(0,"Creating redundant coarse problems of size %dn",csize);
969:   } else {
970:     *outcomm = comm;
971:   }
972:   return(0);
973: }

975: /*@C
976:        DASetLocalFunction - Caches in a DA a local function

978:    Collective on DA

980:    Input Parameter:
981: +  da - initial distributed array
982: -  lf - the local function

984:    Level: intermediate

986: .keywords:  distributed array, refine

988: .seealso: DACreate1d(), DACreate2d(), DACreate3d(), DADestroy(), DAGetLocalFunction(), DASetLocalFunctioni()
989: @*/
990: int DASetLocalFunction(DA da,DALocalFunction1 lf)
991: {
994:   da->lf    = lf;
995:   return(0);
996: }

998: /*@C
999:        DASetLocalFunctioni - Caches in a DA a local function that evaluates a single component

1001:    Collective on DA

1003:    Input Parameter:
1004: +  da - initial distributed array
1005: -  lfi - the local function

1007:    Level: intermediate

1009: .keywords:  distributed array, refine

1011: .seealso: DACreate1d(), DACreate2d(), DACreate3d(), DADestroy(), DAGetLocalFunction(), DASetLocalFunction()
1012: @*/
1013: int DASetLocalFunctioni(DA da,int (*lfi)(DALocalInfo*,MatStencil*,void*,PetscScalar*,void*))
1014: {
1017:   da->lfi = lfi;
1018:   return(0);
1019: }

1021: /*MC
1022:        DASetLocalAdicFunction - Caches in a DA a local function computed by ADIC/ADIFOR

1024:    Collective on DA

1026:    Synopsis:
1027:    int int DASetLocalAdicFunction(DA da,DALocalFunction1 ad_lf)
1028:    
1029:    Input Parameter:
1030: +  da - initial distributed array
1031: -  ad_lf - the local function as computed by ADIC/ADIFOR

1033:    Level: intermediate

1035: .keywords:  distributed array, refine

1037: .seealso: DACreate1d(), DACreate2d(), DACreate3d(), DADestroy(), DAGetLocalFunction(), DASetLocalFunction(),
1038:           DASetLocalJacobian()
1039: M*/

1041: int DASetLocalAdicFunction_Private(DA da,DALocalFunction1 ad_lf)
1042: {
1045:   da->adic_lf = ad_lf;
1046:   return(0);
1047: }

1049: /*MC
1050:        DASetLocalAdicFunctioni - Caches in a DA a local functioni computed by ADIC/ADIFOR

1052:    Collective on DA

1054:    Synopsis:
1055:    int int DASetLocalAdicFunctioni(DA da,int (ad_lf*)(DALocalInfo*,MatStencil*,void*,void*,void*)
1056:    
1057:    Input Parameter:
1058: +  da - initial distributed array
1059: -  ad_lfi - the local function as computed by ADIC/ADIFOR

1061:    Level: intermediate

1063: .keywords:  distributed array, refine

1065: .seealso: DACreate1d(), DACreate2d(), DACreate3d(), DADestroy(), DAGetLocalFunction(), DASetLocalFunction(),
1066:           DASetLocalJacobian(), DASetLocalFunctioni()
1067: M*/

1069: int DASetLocalAdicFunctioni_Private(DA da,int (*ad_lfi)(DALocalInfo*,MatStencil*,void*,void*,void*))
1070: {
1073:   da->adic_lfi = ad_lfi;
1074:   return(0);
1075: }

1077: /*MC
1078:        DASetLocalAdicMFFunctioni - Caches in a DA a local functioni computed by ADIC/ADIFOR

1080:    Collective on DA

1082:    Synopsis:
1083:    int int DASetLocalAdicFunctioni(DA da,int (ad_lf*)(DALocalInfo*,MatStencil*,void*,void*,void*)
1084:    
1085:    Input Parameter:
1086: +  da - initial distributed array
1087: -  admf_lfi - the local matrix-free function as computed by ADIC/ADIFOR

1089:    Level: intermediate

1091: .keywords:  distributed array, refine

1093: .seealso: DACreate1d(), DACreate2d(), DACreate3d(), DADestroy(), DAGetLocalFunction(), DASetLocalFunction(),
1094:           DASetLocalJacobian(), DASetLocalFunctioni()
1095: M*/

1097: int DASetLocalAdicMFFunctioni_Private(DA da,int (*admf_lfi)(DALocalInfo*,MatStencil*,void*,void*,void*))
1098: {
1101:   da->adicmf_lfi = admf_lfi;
1102:   return(0);
1103: }

1105: /*MC
1106:        DASetLocalAdicMFFunction - Caches in a DA a local function computed by ADIC/ADIFOR

1108:    Collective on DA

1110:    Synopsis:
1111:    int int DASetLocalAdicMFFunction(DA da,DALocalFunction1 ad_lf)
1112:    
1113:    Input Parameter:
1114: +  da - initial distributed array
1115: -  ad_lf - the local function as computed by ADIC/ADIFOR

1117:    Level: intermediate

1119: .keywords:  distributed array, refine

1121: .seealso: DACreate1d(), DACreate2d(), DACreate3d(), DADestroy(), DAGetLocalFunction(), DASetLocalFunction(),
1122:           DASetLocalJacobian()
1123: M*/

1125: int DASetLocalAdicMFFunction_Private(DA da,DALocalFunction1 ad_lf)
1126: {
1129:   da->adicmf_lf = ad_lf;
1130:   return(0);
1131: }

1133: /*@C
1134:        DASetLocalJacobian - Caches in a DA a local Jacobian

1136:    Collective on DA

1138:    
1139:    Input Parameter:
1140: +  da - initial distributed array
1141: -  lj - the local Jacobian

1143:    Level: intermediate

1145: .keywords:  distributed array, refine

1147: .seealso: DACreate1d(), DACreate2d(), DACreate3d(), DADestroy(), DAGetLocalFunction(), DASetLocalFunction()
1148: @*/
1149: int DASetLocalJacobian(DA da,DALocalFunction1 lj)
1150: {
1153:   da->lj    = lj;
1154:   return(0);
1155: }

1157: /*@C
1158:        DAGetLocalFunction - Gets from a DA a local function and its ADIC/ADIFOR Jacobian

1160:    Collective on DA

1162:    Input Parameter:
1163: .  da - initial distributed array

1165:    Output Parameters:
1166: .  lf - the local function

1168:    Level: intermediate

1170: .keywords:  distributed array, refine

1172: .seealso: DACreate1d(), DACreate2d(), DACreate3d(), DADestroy(), DASetLocalFunction()
1173: @*/
1174: int DAGetLocalFunction(DA da,DALocalFunction1 *lf)
1175: {
1178:   if (lf)       *lf = da->lf;
1179:   return(0);
1180: }

1182: /*@
1183:     DAFormFunction1 - Evaluates a user provided function on each processor that 
1184:         share a DA

1186:    Input Parameters:
1187: +    da - the DA that defines the grid
1188: .    vu - input vector
1189: .    vfu - output vector 
1190: -    w - any user data

1192:     Notes: Does NOT do ghost updates on vu upon entry

1194:     Level: advanced

1196: .seealso: DAComputeJacobian1WithAdic()

1198: @*/
1199: int DAFormFunction1(DA da,Vec vu,Vec vfu,void *w)
1200: {
1201:   int         ierr;
1202:   void        *u,*fu;
1203:   DALocalInfo info;
1204: 

1207:   DAGetLocalInfo(da,&info);
1208:   DAVecGetArray(da,vu,(void**)&u);
1209:   DAVecGetArray(da,vfu,(void**)&fu);

1211:   (*da->lf)(&info,u,fu,w);

1213:   DAVecRestoreArray(da,vu,(void**)&u);
1214:   DAVecRestoreArray(da,vfu,(void**)&fu);
1215:   return(0);
1216: }

1218: int DAFormFunctioniTest1(DA da,void *w)
1219: {
1220:   Vec         vu,fu,fui;
1221:   int         ierr,i,n;
1222:   PetscScalar *ui,mone = -1.0;
1223:   PetscRandom rnd;
1224:   PetscReal   norm;

1227:   DAGetLocalVector(da,&vu);
1228:   PetscRandomCreate(PETSC_COMM_SELF,RANDOM_DEFAULT,&rnd);
1229:   VecSetRandom(rnd,vu);
1230:   PetscRandomDestroy(rnd);

1232:   DAGetGlobalVector(da,&fu);
1233:   DAGetGlobalVector(da,&fui);
1234: 
1235:   DAFormFunction1(da,vu,fu,w);

1237:   VecGetArray(fui,&ui);
1238:   VecGetLocalSize(fui,&n);
1239:   for (i=0; i<n; i++) {
1240:     DAFormFunctioni1(da,i,vu,ui+i,w);
1241:   }
1242:   VecRestoreArray(fui,&ui);

1244:   VecAXPY(&mone,fu,fui);
1245:   VecNorm(fui,NORM_2,&norm);
1246:   PetscPrintf(da->comm,"Norm of difference in vectors %gn",norm);
1247:   VecView(fu,0);
1248:   VecView(fui,0);

1250:   DARestoreLocalVector(da,&vu);
1251:   DARestoreGlobalVector(da,&fu);
1252:   DARestoreGlobalVector(da,&fui);
1253:   return(0);
1254: }

1256: /*@
1257:     DAFormFunctioni1 - Evaluates a user provided function

1259:    Input Parameters:
1260: +    da - the DA that defines the grid
1261: .    i - the component of the function we wish to compute (must be local)
1262: .    vu - input vector
1263: .    vfu - output value
1264: -    w - any user data

1266:     Notes: Does NOT do ghost updates on vu upon entry

1268:     Level: advanced

1270: .seealso: DAComputeJacobian1WithAdic()

1272: @*/
1273: int DAFormFunctioni1(DA da,int i,Vec vu,PetscScalar *vfu,void *w)
1274: {
1275:   int         ierr;
1276:   void        *u;
1277:   DALocalInfo info;
1278:   MatStencil  stencil;
1279: 

1282:   DAGetLocalInfo(da,&info);
1283:   DAVecGetArray(da,vu,(void**)&u);

1285:   /* figure out stencil value from i */
1286:   stencil.c = i % info.dof;
1287:   stencil.i = (i % (info.xm*info.dof))/info.dof;
1288:   stencil.j = (i % (info.xm*info.ym*info.dof))/(info.xm*info.dof);
1289:   stencil.k = i/(info.xm*info.ym*info.dof);

1291:   (*da->lfi)(&info,&stencil,u,vfu,w);

1293:   DAVecRestoreArray(da,vu,(void**)&u);
1294:   return(0);
1295: }

1297: #if defined(new)
1298: /*
1299:   DAGetDiagonal_MFFD - Gets the diagonal for a matrix free matrix where local
1300:     function lives on a DA

1302:         y ~= (F(u + ha) - F(u))/h, 
1303:   where F = nonlinear function, as set by SNESSetFunction()
1304:         u = current iterate
1305:         h = difference interval
1306: */
1307: int DAGetDiagonal_MFFD(DA da,Vec U,Vec a)
1308: {
1309:   PetscScalar  h,*aa,*ww,v;
1310:   PetscReal    epsilon = PETSC_SQRT_MACHINE_EPSILON,umin = 100.0*PETSC_SQRT_MACHINE_EPSILON;
1311:   int          ierr,gI,nI;
1312:   MatStencil   stencil;
1313:   DALocalInfo  info;
1314: 
1316:   (*ctx->func)(0,U,a,ctx->funcctx);
1317:   (*ctx->funcisetbase)(U,ctx->funcctx);

1319:   VecGetArray(U,&ww);
1320:   VecGetArray(a,&aa);
1321: 
1322:   nI = 0;
1323:     h  = ww[gI];
1324:     if (h == 0.0) h = 1.0;
1325: #if !defined(PETSC_USE_COMPLEX)
1326:     if (h < umin && h >= 0.0)      h = umin;
1327:     else if (h < 0.0 && h > -umin) h = -umin;
1328: #else
1329:     if (PetscAbsScalar(h) < umin && PetscRealPart(h) >= 0.0)     h = umin;
1330:     else if (PetscRealPart(h) < 0.0 && PetscAbsScalar(h) < umin) h = -umin;
1331: #endif
1332:     h     *= epsilon;
1333: 
1334:     ww[gI += h;
1335:     ierr          = (*ctx->funci)(i,w,&v,ctx->funcctx);
1336:     aa[nI]  = (v - aa[nI])/h;
1337:     ww[gI] -= h;
1338:     nI++;
1339:   }
1340:   VecRestoreArray(U,&ww);
1341:   VecRestoreArray(a,&aa);
1342:   return(0);
1343: }
1344: #endif

1346: #if defined(PETSC_HAVE_ADIC) && !defined(PETSC_USE_COMPLEX) && !defined(PETSC_USE_SINGLE)
1347: EXTERN_C_BEGIN
1348: #include "adic/ad_utils.h"
1349: EXTERN_C_END

1351: /*@
1352:     DAComputeJacobian1WithAdic - Evaluates a adiC provided Jacobian function on each processor that 
1353:         share a DA

1355:    Input Parameters:
1356: +    da - the DA that defines the grid
1357: .    vu - input vector (ghosted)
1358: .    J - output matrix
1359: -    w - any user data

1361:    Level: advanced

1363:     Notes: Does NOT do ghost updates on vu upon entry

1365: .seealso: DAFormFunction1()

1367: @*/
1368: int DAComputeJacobian1WithAdic(DA da,Vec vu,Mat J,void *w)
1369: {
1370:   int         ierr,gtdof,tdof;
1371:   PetscScalar *ustart;
1372:   DALocalInfo info;
1373:   void        *ad_u,*ad_f,*ad_ustart,*ad_fstart;
1374:   ISColoring  iscoloring;

1377:   DAGetLocalInfo(da,&info);

1379:   /* get space for derivative objects.  */
1380:   DAGetAdicArray(da,PETSC_TRUE,(void **)&ad_u,&ad_ustart,&gtdof);
1381:   DAGetAdicArray(da,PETSC_FALSE,(void **)&ad_f,&ad_fstart,&tdof);
1382:   VecGetArray(vu,&ustart);
1383:   PetscADSetValArray(((DERIV_TYPE*)ad_ustart),gtdof,ustart);
1384:   VecRestoreArray(vu,&ustart);

1386:   PetscADResetIndep();
1387:   DAGetColoring(da,IS_COLORING_GHOSTED,&iscoloring);
1388:   PetscADSetIndepArrayColored(ad_ustart,gtdof,iscoloring->colors);
1389:   PetscADIncrementTotalGradSize(iscoloring->n);
1390:   ISColoringDestroy(iscoloring);
1391:   PetscADSetIndepDone();

1393:   (*da->adic_lf)(&info,ad_u,ad_f,w);

1395:   /* stick the values into the matrix */
1396:   MatSetValuesAdic(J,(PetscScalar**)ad_fstart);

1398:   /* return space for derivative objects.  */
1399:   DARestoreAdicArray(da,PETSC_TRUE,(void **)&ad_u,&ad_ustart,&gtdof);
1400:   DARestoreAdicArray(da,PETSC_FALSE,(void **)&ad_f,&ad_fstart,&tdof);
1401:   return(0);
1402: }

1404: /*@C
1405:     DAMultiplyByJacobian1WithAdic - Applies an ADIC-provided Jacobian function to a vector on 
1406:     each processor that shares a DA.

1408:     Input Parameters:
1409: +   da - the DA that defines the grid
1410: .   vu - Jacobian is computed at this point (ghosted)
1411: .   v - product is done on this vector (ghosted)
1412: .   fu - output vector = J(vu)*v (not ghosted)
1413: -   w - any user data

1415:     Notes: 
1416:     This routine does NOT do ghost updates on vu upon entry.

1418:    Level: advanced

1420: .seealso: DAFormFunction1()

1422: @*/
1423: int DAMultiplyByJacobian1WithAdic(DA da,Vec vu,Vec v,Vec f,void *w)
1424: {
1425:   int         ierr,i,gtdof,tdof;
1426:   PetscScalar *avu,*av,*af,*ad_vustart,*ad_fstart;
1427:   DALocalInfo info;
1428:   void        *ad_vu,*ad_f;

1431:   DAGetLocalInfo(da,&info);

1433:   /* get space for derivative objects.  */
1434:   DAGetAdicMFArray(da,PETSC_TRUE,(void **)&ad_vu,(void**)&ad_vustart,&gtdof);
1435:   DAGetAdicMFArray(da,PETSC_FALSE,(void **)&ad_f,(void**)&ad_fstart,&tdof);

1437:   /* copy input vector into derivative object */
1438:   VecGetArray(vu,&avu);
1439:   VecGetArray(v,&av);
1440:   for (i=0; i<gtdof; i++) {
1441:     ad_vustart[2*i]   = avu[i];
1442:     ad_vustart[2*i+1] = av[i];
1443:   }
1444:   VecRestoreArray(vu,&avu);
1445:   VecRestoreArray(v,&av);

1447:   PetscADResetIndep();
1448:   PetscADIncrementTotalGradSize(1);
1449:   PetscADSetIndepDone();

1451:   (*da->adicmf_lf)(&info,ad_vu,ad_f,w);

1453:   /* stick the values into the vector */
1454:   VecGetArray(f,&af);
1455:   for (i=0; i<tdof; i++) {
1456:     af[i] = ad_fstart[2*i+1];
1457:   }
1458:   VecRestoreArray(f,&af);

1460:   /* return space for derivative objects.  */
1461:   DARestoreAdicMFArray(da,PETSC_TRUE,(void **)&ad_vu,(void**)&ad_vustart,&gtdof);
1462:   DARestoreAdicMFArray(da,PETSC_FALSE,(void **)&ad_f,(void**)&ad_fstart,&tdof);
1463:   return(0);
1464: }


1467: #else

1469: int DAComputeJacobian1WithAdic(DA da,Vec vu,Mat J,void *w)
1470: {
1472:   SETERRQ(1,"Must compile with bmake/PETSC_ARCH/packages flag PETSC_HAVE_ADIC for this routine");
1473: }

1475: int DAMultiplyByJacobian1WithAdic(DA da,Vec vu,Vec v,Vec f,void *w)
1476: {
1478:   SETERRQ(1,"Must compile with bmake/PETSC_ARCH/packages flag PETSC_HAVE_ADIC for this routine");
1479: }

1481: #endif

1483: /*@
1484:     DAComputeJacobian1 - Evaluates a local Jacobian function on each processor that 
1485:         share a DA

1487:    Input Parameters:
1488: +    da - the DA that defines the grid
1489: .    vu - input vector (ghosted)
1490: .    J - output matrix
1491: -    w - any user data

1493:     Notes: Does NOT do ghost updates on vu upon entry

1495:     Level: advanced

1497: .seealso: DAFormFunction1()

1499: @*/
1500: int DAComputeJacobian1(DA da,Vec vu,Mat J,void *w)
1501: {
1502:   int         ierr;
1503:   void        *u;
1504:   DALocalInfo info;

1507:   DAGetLocalInfo(da,&info);
1508:   DAVecGetArray(da,vu,&u);
1509:   (*da->lj)(&info,u,J,w);
1510:   DAVecRestoreArray(da,vu,&u);
1511:   return(0);
1512: }


1515: /*
1516:     DAComputeJacobian1WithAdifor - Evaluates a ADIFOR provided Jacobian local function on each processor that 
1517:         share a DA

1519:    Input Parameters:
1520: +    da - the DA that defines the grid
1521: .    vu - input vector (ghosted)
1522: .    J - output matrix
1523: -    w - any user data

1525:     Notes: Does NOT do ghost updates on vu upon entry

1527: .seealso: DAFormFunction1()

1529: */
1530: int DAComputeJacobian1WithAdifor(DA da,Vec vu,Mat J,void *w)
1531: {
1532:   int         i,ierr,Nc,*color,N;
1533:   DALocalInfo info;
1534:   PetscScalar *u,*g_u,*g_f,*f,*p_u;
1535:   ISColoring  iscoloring;
1536:   void        (*lf)(int *,DALocalInfo*,PetscScalar*,PetscScalar*,int*,PetscScalar*,PetscScalar*,int*,void*,int*) =
1537:               (void (*)(int *,DALocalInfo*,PetscScalar*,PetscScalar*,int*,PetscScalar*,PetscScalar*,int*,void*,int*))*da->adifor_lf;

1540:   DAGetColoring(da,IS_COLORING_GHOSTED,&iscoloring);
1541:   Nc   = iscoloring->n;
1542:   DAGetLocalInfo(da,&info);
1543:   N    = info.gxm*info.gym*info.gzm*info.dof;

1545:   /* get space for derivative objects.  */
1546:   ierr  = PetscMalloc(Nc*info.gxm*info.gym*info.gzm*info.dof*sizeof(PetscScalar),&g_u);
1547:   ierr  = PetscMemzero(g_u,Nc*info.gxm*info.gym*info.gzm*info.dof*sizeof(PetscScalar));
1548:   p_u   = g_u;
1549:   color = iscoloring->colors;
1550:   for (i=0; i<N; i++) {
1551:     p_u[*color++] = 1.0;
1552:     p_u          += Nc;
1553:   }
1554:   ISColoringDestroy(iscoloring);
1555:   PetscMalloc(Nc*info.xm*info.ym*info.zm*info.dof*sizeof(PetscScalar),&g_f);
1556:   PetscMalloc(info.xm*info.ym*info.zm*info.dof*sizeof(PetscScalar),&f);

1558:   /* Seed the input array g_u with coloring information */
1559: 
1560:   VecGetArray(vu,&u);
1561:   (lf)(&Nc,&info,u,g_u,&Nc,f,g_f,&Nc,w,&ierr);
1562:   VecRestoreArray(vu,&u);

1564:   /* stick the values into the matrix */
1565:   /* PetscScalarView(Nc*info.xm*info.ym,g_f,0); */
1566:   MatSetValuesAdifor(J,Nc,g_f);

1568:   /* return space for derivative objects.  */
1569:   PetscFree(g_u);
1570:   PetscFree(g_f);
1571:   PetscFree(f);
1572:   return(0);
1573: }

1575: /*@C
1576:     DAMultiplyByJacobian1WithAD - Applies a Jacobian function supplied by ADIFOR or ADIC
1577:     to a vector on each processor that shares a DA.

1579:    Input Parameters:
1580: +    da - the DA that defines the grid
1581: .    vu - Jacobian is computed at this point (ghosted)
1582: .    v - product is done on this vector (ghosted)
1583: .    fu - output vector = J(vu)*v (not ghosted)
1584: -    w - any user data

1586:     Notes: 
1587:     This routine does NOT do ghost updates on vu and v upon entry.
1588:            
1589:     Automatically calls DAMultiplyByJacobian1WithAdifor() or DAMultiplyByJacobian1WithAdic()
1590:     depending on whether DASetLocalAdicMFFunction() or DASetLocalAdiforMFFunction() was called.

1592:    Level: advanced

1594: .seealso: DAFormFunction1(), DAMultiplyByJacobian1WithAdifor(), DAMultiplyByJacobian1WithAdic()

1596: @*/
1597: int DAMultiplyByJacobian1WithAD(DA da,Vec u,Vec v,Vec f,void *w)
1598: {
1599:   int         ierr;

1602:   if (da->adicmf_lf) {
1603: #if defined(PETSC_HAVE_ADIC) && !defined(PETSC_USE_COMPLEX) && !defined(PETSC_USE_SINGLE)
1604:     DAMultiplyByJacobian1WithAdic(da,u,v,f,w);
1605: #else
1606:     SETERRQ(1,"Requires ADIC to be installed and cannot use complex numbers");
1607: #endif
1608:   } else if (da->adiformf_lf) {
1609:     DAMultiplyByJacobian1WithAdifor(da,u,v,f,w);
1610:   } else {
1611:     SETERRQ(1,"Must call DASetLocalAdiforMFFunction() or DASetLocalAdicMFFunction() before using");
1612:   }
1613:   return(0);
1614: }


1617: /*@C
1618:     DAMultiplyByJacobian1WithAdifor - Applies a ADIFOR provided Jacobian function on each processor that 
1619:         share a DA to a vector

1621:    Input Parameters:
1622: +    da - the DA that defines the grid
1623: .    vu - Jacobian is computed at this point (ghosted)
1624: .    v - product is done on this vector (ghosted)
1625: .    fu - output vector = J(vu)*v (not ghosted)
1626: -    w - any user data

1628:     Notes: Does NOT do ghost updates on vu and v upon entry

1630:    Level: advanced

1632: .seealso: DAFormFunction1()

1634: @*/
1635: int DAMultiplyByJacobian1WithAdifor(DA da,Vec u,Vec v,Vec f,void *w)
1636: {
1637:   int         ierr;
1638:   PetscScalar *au,*av,*af,*awork;
1639:   Vec         work;
1640:   DALocalInfo info;
1641:   void        (*lf)(DALocalInfo*,PetscScalar*,PetscScalar*,PetscScalar*,PetscScalar*,void*,int*) =
1642:               (void (*)(DALocalInfo*,PetscScalar*,PetscScalar*,PetscScalar*,PetscScalar*,void*,int*))*da->adiformf_lf;

1645:   DAGetLocalInfo(da,&info);

1647:   DAGetGlobalVector(da,&work);
1648:   VecGetArray(u,&au);
1649:   VecGetArray(v,&av);
1650:   VecGetArray(f,&af);
1651:   VecGetArray(work,&awork);
1652:   (lf)(&info,au,av,awork,af,w,&ierr);
1653:   VecRestoreArray(u,&au);
1654:   VecRestoreArray(v,&av);
1655:   VecRestoreArray(f,&af);
1656:   VecRestoreArray(work,&awork);
1657:   DARestoreGlobalVector(da,&work);

1659:   return(0);
1660: }

1662: /*@C
1663:        DASetInterpolationType - Sets the type of interpolation that will be 
1664:           returned by DAGetInterpolation()

1666:    Collective on DA

1668:    Input Parameter:
1669: +  da - initial distributed array
1670: .  ctype - DA_Q1 and DA_Q0 are currently the only supported forms

1672:    Level: intermediate

1674: .keywords:  distributed array, interpolation

1676: .seealso: DACreate1d(), DACreate2d(), DACreate3d(), DADestroy(), DA, DAInterpolationType
1677: @*/
1678: int DASetInterpolationType(DA da,DAInterpolationType ctype)
1679: {
1682:   da->interptype = ctype;
1683:   return(0);
1684: }