Actual source code: gvec.c

  1: #ifdef PETSC_RCS_HEADER
  2: static char vcid[] = "$Id: gvec.c,v 1.48 2000/07/16 23:20:04 knepley Exp $";
  3: #endif
  4: /*
  5:   This file provides routines for grid vectors (vectors that are associated with grids,
  6:   possibly with multiple degrees of freedom per node). 
  7: */

  9: #include "petscsles.h"            /* For ALE Mesh Support */
 10:  #include src/gvec/gvecimpl.h

 12: /* Logging support */
 13: int GVEC_EvaluateFunction, GVEC_EvaluateFunctionCollective, GVEC_EvaluateRhs, GVEC_EvaluateJacobian;
 14: int GVEC_SetBoundary, GVEC_InterpolateField, GVEC_InterpolateFieldBatch, GVEC_InterpolateFieldBatchParallel;
 15: int GVEC_InterpolateFieldBatchCalc;

 17: extern int VecCreate_MPI_Private(Vec, int, const PetscScalar [], PetscMap);

 19: /*--------------------------------------------- General Vector Functions ---------------------------------------------*/
 20: /*@C
 21:    GVecDestroy - Destroys a grid vector.

 23:    Input Parameters:
 24: .  v  - the vector

 26:   Level: beginner

 28: .keywords: grid vector, destroy
 29: .seealso: VecDestroy(), GVecDuplicate()
 30: @*/
 31: int GVecDestroy(GVec v)
 32: {
 33:   int  ierr;

 37:   if (--v->refct > 0) return(0);
 38:   VecDestroy(v);
 39:   return(0);
 40: }

 42: /*@
 43:   GVecViewFromOptions - This function visualizes the vector based upon user options.

 45:   Collective on GVec

 47:   Input Parameter:
 48: . gvec - The vector

 50:   Level: intermediate

 52: .keywords: GVec, view, options, database
 53: .seealso: GVecSetFromOptions(), GVecView()
 54: @*/
 55: int GVecViewFromOptions(GVec gvec, char *title)
 56: {
 57:   PetscViewer viewer;
 58:   PetscDraw   draw;
 59:   PetscTruth  opt;
 60:   char       *titleStr;
 61:   char        typeName[1024];
 62:   char        fileName[1024];
 63:   int         len;
 64:   int         ierr;

 67:   PetscOptionsHasName(gvec->prefix, "-gvec_view", &opt);
 68:   if (opt == PETSC_TRUE) {
 69:     PetscOptionsGetString(gvec->prefix, "-gvec_view", typeName, 1024, &opt);
 70:     PetscStrlen(typeName, &len);
 71:     if (len > 0) {
 72:       PetscViewerCreate(gvec->comm, &viewer);
 73:       PetscViewerSetType(viewer, typeName);
 74:       PetscOptionsGetString(gvec->prefix, "-gvec_view_file", fileName, 1024, &opt);
 75:       if (opt == PETSC_TRUE) {
 76:         PetscViewerSetFilename(viewer, fileName);
 77:       } else {
 78:         PetscViewerSetFilename(viewer, gvec->name);
 79:       }
 80:       GVecView(gvec, viewer);
 81:       PetscViewerFlush(viewer);
 82:       PetscViewerDestroy(viewer);
 83:     } else {
 84:       GVecView(gvec, PETSC_NULL);
 85:     }
 86:   }
 87:   PetscOptionsHasName(gvec->prefix, "-gvec_view_draw", &opt);
 88:   if (opt == PETSC_TRUE) {
 89:     PetscViewerDrawOpen(gvec->comm, 0, 0, 0, 0, 300, 300, &viewer);
 90:     PetscViewerDrawGetDraw(viewer, 0, &draw);
 91:     if (title != PETSC_NULL) {
 92:       titleStr = title;
 93:     } else {
 94:       PetscObjectName((PetscObject) gvec);                                                         CHKERRQ(ierr) ;
 95:       titleStr = gvec->name;
 96:     }
 97:     PetscDrawSetTitle(draw, titleStr);
 98:     GVecView(gvec, viewer);
 99:     PetscViewerFlush(viewer);
100:     PetscDrawPause(draw);
101:     PetscViewerDestroy(viewer);
102:   }
103:   return(0);
104: }

106: /*@ 
107:    GVecView - Views a grid vector.

109:    Input Parameters:
110: .  v      - The grid vector
111: .  viewer - [Optional] A visualization context

113:    Notes:
114:    GVecView() supports the same viewers as VecView().  The only difference
115:    is that for all multiprocessor cases, the output vector employs the natural
116:    ordering of the grid, so it many cases this corresponds to the ordering 
117:    that would have been used for the uniprocessor case.

119:    The available visualization contexts include
120: $     PETSC_VIEWER_STDOUT_SELF - standard output (default)
121: $     PETSC_VIEWER_STDOUT_WORLD - synchronized standard
122: $       output where only the first processor opens
123: $       the file.  All other processors send their 
124: $       data to the first processor to print. 

126:    The user can open alternative vistualization contexts with
127: $    PetscViewerFileOpenASCII() - output vector to a specified file
128: $    PetscViewerFileOpenBinary() - output in binary to a
129: $         specified file; corresponding input uses VecLoad()
130: $    PetscViewerDrawOpenX() - output vector to an X window display
131: $    DrawLGCreate() - output vector as a line graph to an X window display
132: $    PetscViewerMatlabOpen() - output vector to Matlab viewer
133: $    PetscViewerMathematicaOpen() - output vector to Mathematica viewer

135:   Level: beginner

137: .keywords: view, visualize, output, print, write, draw
138: .seealso: VecView()
139: @*/
140: int GVecView(GVec v, PetscViewer viewer)
141: {
142:   Grid grid;
143:   int  ierr;

147:   if (!viewer) {
148:     viewer = PETSC_VIEWER_STDOUT_SELF;
149:   } else {
151:   }

153:   GVecGetGrid(v, &grid);
154:   (*grid->ops->gvecview)(v, viewer);
155:   return(0);
156: }

158: /*@ 
159:   GVecSerialize - This function stores or recreates a grid vector using a viewer for
160:   a binary file.

162:   Input Parameters:
163: . viewer - The viewer context
164: . store  - This flag is PETSC_TRUE is data is being written, otherwise it will be read

166:   Output Parameter:
167: . v      - The grid vector

169:   Level: beginner

171: .keywords: grid vector, serialize
172: .seealso: GridSerialize()
173: @*/
174: int GVecSerialize(Grid grid, GVec *v, PetscViewer viewer, PetscTruth store)
175: {


183:   VecSerialize(grid->comm, v, viewer, store);
184:   /* Newly created object should have grid as a parent */
185:   if (store == PETSC_FALSE) {
186:     PetscObjectCompose((PetscObject) *v, "Grid", (PetscObject) grid);
187:   }
188: 
189:   return(0);
190: }

192: /*@C
193:    GVecDuplicate - Duplicates a grid vector.

195:    Input Parameters:
196: .  v  - the vector

198:   Level: beginner

200: .keywords: grid vector, destroy
201: .seealso: VecDuplicate(), GVecDestroy()
202: @*/
203: int GVecDuplicate(GVec v, GVec *newvec)
204: {

209:   VecDuplicate(v, newvec);
210:   return(0);
211: }

213: /*@
214:   GVecGetGrid - This function returns the grid from a grid vector.

216:   Not collective

218:   Input Parameter:
219: . v    - The vector

221:   Output Parameter:
222: . grid - The grid

224:   Level: intermediate

226: .keywords: grid vector, get, grid
227: .seealso: GVecGetOrder(), GridGetMesh(), GMatGetGrid()
228: @*/
229: int GVecGetGrid(GVec v, Grid *grid)
230: {


237:   PetscObjectQuery((PetscObject) v, "Grid", (PetscObject *) grid);
239:   return(0);
240: }

242: /*@
243:   GVecGetOrder - This function returns the variable ordering from a grid vector.

245:   Not collective

247:   Input Parameter:
248: . v     - The vector

250:   Output Parameter:
251: . order - The variable ordering

253:   Level: intermediate

255: .keywords: grid vector, get, variable ordering
256: .seealso: GVecGetGrid(), GridGetMesh(), GMatGetGrid()
257: @*/
258: int GVecGetOrder(GVec v, VarOrdering *order)
259: {


266:   PetscObjectQuery((PetscObject) v, "Order", (PetscObject *) order);
268:   return(0);
269: }

271: /*@C
272:   GVecScatterCreate - This function creates a scatter from a vector to an embedded vector.

274:   Collective on GVec

276:   Input Parameters:
277: + vec      - A vector
278: - embedVec - A vector constructed from an embedded ordering

280:   Output Parameter:
281: . scatter  - The scatter

283:   Level: beginner

285: .keywords variable ordering, vector scatter, embedding
286: .seealso VecScatterCreate()
287: @*/
288: int GVecScatterCreate(GVec vec, GVec embedVec, VecScatter *scatter)
289: {
290:   Grid          grid;
291:   VarOrdering   order, embedOrder;
292:   FieldClassMap map,   embedMap;
293:   int           f, field, nclass;
294:   int           ierr;

300:   GVecGetGrid(vec, &grid);
301:   GVecGetOrder(vec,      &order);
302:   GVecGetOrder(embedVec, &embedOrder);
303:   VarOrderingGetClassMap(order,      &map);
304:   VarOrderingGetClassMap(embedOrder, &embedMap);
305:   if (map->numFields < embedMap->numFields) {
306:     SETERRQ2(PETSC_ERR_ARG_INCOMP, "The embedded ordering cannot have fewer fields than the original (%d < %d)",
307:              map->numFields, embedMap->numFields);
308:   }
309:   if (map->numClasses != embedMap->numClasses) {
310:     SETERRQ2(PETSC_ERR_ARG_INCOMP, "The embedded ordering must have the same number of classes as the original (%d < %d)",
311:              map->numClasses, embedMap->numClasses);
312:   }
313:   for(nclass = 0; nclass < map->numClasses; nclass++) {
314:     if (embedMap->classSizes[nclass] > 0) {
315:       if (map->classSizes[nclass] <= 0) {
316:         SETERRQ1(PETSC_ERR_ARG_INCOMP, "Mapping not an embedding: class %d is larger than the original", nclass);
317:       }
318:     }
319:     for(f = 0; f < embedMap->numFields; f++) {
320:       field = map->fields[f];
321:       if ((order->localStart[field][nclass] < 0) && (embedOrder->localStart[field][nclass] >= 0)) {
322:         SETERRQ2(PETSC_ERR_ARG_INCOMP, "Mapping not an embedding: field %d not present in class %d in the original",
323:                  field, nclass);
324:       }
325:     }
326:   }
327:   GridSetUp(grid);
328:   (*grid->ops->gridcreatevarscatter)(grid, order, vec, embedOrder, embedVec, scatter);
329:   return(0);
330: }

332: /*@
333:    GVecGetLocalWorkGVec - Creates a new GVec to contain the local + ghost
334:        values portion of a GVec.

336:    Input Parameter:
337: .  v - the grid vector

339:    Output Parameters:
340: .  lv - the local vector

342:   Level: intermediate

344: .seealso: GVecLocalToGlobal(), GVecGlobalToLocal(), GVecRestoreLocalWorkGVec(),
345:           GVecGetWorkGVec(), GVecRestoreWorkGVec()
346: @*/
347: int GVecGetLocalWorkGVec(GVec v, GVec *lv)
348: {
349:   Grid grid;
350:   int  ierr;


356:   GVecGetGrid(v, &grid);
357:   (*grid->ops->gvecgetlocalworkgvec)(v, lv);
358:   return(0);
359: }

361: /*@
362:    GVecRestoreLocalWorkGVec - 

364:    Input Parameter:
365: .  v - the grid vector

367:    Output Parameters:
368: .  lv - the local vector

370:   Level: intermediate

372: .seealso: GVecLocalToGlobal(), GVecGlobalToLocal(), GVecGetLocalWorkGVec(),
373:           GVecGetWorkGVec(), GVecRestoreWorkGVec()
374: @*/
375: int GVecRestoreLocalWorkGVec(GVec v, GVec *lv)
376: {
377:   Grid grid;
378:   int  ierr;


384:   GVecGetGrid(v, &grid);
385:   (*grid->ops->gvecrestorelocalworkgvec)(v, lv);
386:   return(0);
387: }

389: /*@
390:   GVecGetWorkGVec - Get an identical work grid vector.

392:   Input Parameter:
393: . v  - The grid vector

395:   Output Parameters:
396: . wv - The work vector

398:   Level: intermediate

400: .seealso: GVecLocalToGlobal(), GVecGlobalToLocal(), GVecRestoreWorkGVec(),
401:           GVecGetLocalWorkGVec(), GVecRestoreLocalWorkGVec()
402: @*/
403: int GVecGetWorkGVec(GVec v, GVec *wv)
404: {

410:   VecDuplicate(v, wv);
411:   return(0);
412: }

414: /*@
415:   GVecRestoreWorkGVec - Restores the work vector.

417:   Input Parameter:
418: . v  - The grid vector

420:   Output Parameters:
421: . wv - The work vector

423:   Level: intermediate

425: .seealso: GVecLocalToGlobal(), GVecGlobalToLocal(), GVecGetWorkGVec(),
426:           GVecGetLocalWorkGVec(), GVecRestoreLocalWorkGVec()
427: @*/
428: int GVecRestoreWorkGVec(GVec v, GVec *wv)
429: {

435:   VecDestroy(*wv);
436:   return(0);
437: }

439: /*@
440:    GVecGlobalToLocal - Copies a global vector including ghost points to 
441:        a local work vector.

443:    Input Parameter:
444: .  v - the grid vector
445: .  mode - either SET_VALUES or ADD_VALUES

447:    Output Parameters:
448: .  lv - the local vector

450:   Level: intermediate

452: .seealso: GVecLocalToGlobal(), GVecCreateLocalGVec()
453: @*/
454: int GVecGlobalToLocal(GVec v, InsertMode mode, GVec lv)
455: {
456:   int  ierr;
457:   Grid grid;


463:   GVecGetGrid(v, &grid);
464:   (*grid->ops->gvecglobaltolocal)(v, mode, lv);
465:   return(0);
466: }

468: /*@
469:    GVecLocalToGlobal - Copies a local vector including ghost points to its
470:        global representation.

472:    Input Parameter:
473: .  v - the local grid vector
474: .  mode - either SET_VALUES or ADD_VALUES

476:    Output Parameters:
477: .  lv - the global vector

479:   Level: intermediate

481: .seealso: GVecGlobalToLocal(), GVecCreateLocalGVec()
482: @*/
483: int GVecLocalToGlobal(GVec v,InsertMode mode,GVec lv)
484: {
485:   int  ierr;
486:   Grid grid;


492:   GVecGetGrid(v, &grid);
493:   (*grid->ops->gveclocaltoglobal)(v, mode, lv);
494:   return(0);
495: }

497: /*@
498:   GVecEvaluateFunction - Evaluates a function over the specified fields
499:   on the locations defined by the underlying grid and its discretization.

501:   Input Parameters:
502: + v         - The grid vector
503: . numFields - The number of fields to evaluate
504: . fields    - The fields
505: . f         - The user provided function
506: . alpha     - The scalar multiplier
507: - ctx       - [Optional] A user provided context for the function

509:   The function f should return ordered first by node and then by component.
510:         For instance, if superscripts denote components and subscripts denote nodes:
511:     v^0_0 v^1_0 v^2_0 v^0_1 v^1_1 v^2_1 ... v^0_n v^1_n v^2_n
512:   This is the standard for PointFunctions.

514:   Level: intermediate

516: .seealso: GVecEvaluateFunctionGalerkin,GMatEvaluateOperatorGalerkin
517: @*/
518: int GVecEvaluateFunction(GVec v, int numFields, int *fields, PointFunction f, PetscScalar alpha, void *ctx)
519: {
520:   Grid        grid;
521:   VarOrdering order;
522:   int         size;
523:   int         fieldIdx;
524:   int         ierr;

528:   GVecGetGrid(v, &grid);
529:   PetscLogEventBegin(GVEC_EvaluateFunction, v, grid, 0, 0);

531:   VecGetSize(v, &size);
532:   for(fieldIdx = 0; fieldIdx < numFields; fieldIdx++)
533:     if ((fields[fieldIdx] < 0) || (fields[fieldIdx] > grid->numFields)) {
534:       SETERRQ1(PETSC_ERR_ARG_WRONG, "Invalid field number %d", fields[fieldIdx]);
535:     }
536:   /* Should check that fields are in the local order */

538:   if ((grid->isConstrained) && (size == grid->constraintOrder->numVars)) {
539:     VarOrderingCreateSubset(grid->constraintOrder, numFields, fields, PETSC_FALSE, &order);
540:     /* This is a kludge to get around the size check since the new constrained vars are not formed */
541:     order->numLocVars -= order->numLocNewVars;
542:     (*grid->ops->gvecevaluatefunction)(grid, v, order, f, alpha, ctx);
543:     order->numLocVars += order->numLocNewVars;
544:   } else if (size == grid->order->numVars) {
545:     VarOrderingCreateSubset(grid->order, numFields, fields, PETSC_FALSE, &order);
546:     (*grid->ops->gvecevaluatefunction)(grid, v, order, f, alpha, ctx);
547:   } else {
548:     SETERRQ(PETSC_ERR_ARG_INCOMP, "Vector size does not conform to grid");
549:   }
550:   VarOrderingDestroy(order);
551:   PetscLogEventEnd(GVEC_EvaluateFunction, v, grid, 0, 0);
552:   return(0);
553: }

555: /*@
556:   GVecEvaluateFunctionCollective - Evaluates a function over the specified fields
557:   on the locations defined by the underlying grid and its discretization. The only
558:   difference between it and GVecEvaluateFunction(), is that each processor is
559:   guaranteed to call f at each iteration, possibly with null arguments.

561:   Input Parameters:
562: + v         - The grid vector
563: . numFields - The number of fields to evaluate
564: . fields    - The fields
565: . f         - The user provided function
566: . alpha     - The  scalar multiplier
567: - ctx       - [Optional] The user provided context for the function

569:   The function f should return ordered first by node and then by component.
570:         For instance, if superscripts denote components and subscripts denote nodes:
571:     v^0_0 v^1_0 v^2_0 v^0_1 v^1_1 v^2_1 ... v^0_n v^1_n v^2_n
572:   This is the standard for PointFunctions. Also, this function is typically
573:   used with an f that has a barrier. In this way you can guarantee that any
574:   collective operation will complete inside f.

576:   Level: intermediate

578: .seealso: GVecEvaluateFunctionGalerkin,GMatEvaluateOperatorGalerkin
579: @*/
580: int GVecEvaluateFunctionCollective(GVec v, int numFields, int *fields, PointFunction f, PetscScalar alpha, void *ctx)
581: {
582:   Grid        grid;
583:   VarOrdering order;
584:   int         size;
585:   int         fieldIdx;
586:   int         ierr;

590:   GVecGetGrid(v, &grid);
591:   PetscLogEventBegin(GVEC_EvaluateFunctionCollective, v, grid, 0, 0);

593:   VecGetSize(v, &size);
594:   for(fieldIdx = 0; fieldIdx < numFields; fieldIdx++)
595:     if ((fields[fieldIdx] < 0) || (fields[fieldIdx] > grid->numFields)) {
596:       SETERRQ1(PETSC_ERR_ARG_WRONG, "Invalid field number %d", fields[fieldIdx]);
597:     }
598:   /* Should check that fields are in the local order */

600:   if ((grid->isConstrained) && (size == grid->constraintOrder->numVars)) {
601:     VarOrderingCreateSubset(grid->constraintOrder, numFields, fields, PETSC_FALSE, &order);
602:     /* This is a kludge to get around the size check since the new constrained vars are not formed */
603:     order->numLocVars -= order->numLocNewVars;
604:     (*grid->ops->gvecevaluatefunctioncollective)(grid, v, order, f, alpha, ctx);
605:     order->numLocVars += order->numLocNewVars;
606:   } else if (size == grid->order->numVars) {
607:     VarOrderingCreateSubset(grid->order, numFields, fields, PETSC_FALSE, &order);
608:     (*grid->ops->gvecevaluatefunctioncollective)(grid, v, order, f, alpha, ctx);
609:   } else {
610:     SETERRQ(PETSC_ERR_ARG_INCOMP, "Vector size does not conform to grid");
611:   }
612:   VarOrderingDestroy(order);
613:   PetscLogEventEnd(GVEC_EvaluateFunctionCollective, v, grid, 0, 0);
614:   return(0);
615: }

617: /*@
618:   GVecEvaluateFunctionRectangular - Evaluates a function over the
619:   specified fields on the        locations defined by the underlying grid
620:   and its discretization, and takes a user-defined ordering.

622:   Input Parameters:
623: + v     - The grid vector
624: . order - The variable ordering
625: . f     - The user provided function
626: . alpha - A scalar multiplier
627: - ctx   - An optional user provided context for the function

629:   Level: intermediate

631: .seealso: GVecEvaluateFunctionGalerkin,GMatEvaluateOperatorGalerkin
632: @*/
633: int GVecEvaluateFunctionRectangular(GVec v, VarOrdering order, PointFunction f, PetscScalar alpha, void *ctx)
634: {
635:   Grid grid;
636:   int  size;
637:   int  ierr;

641:   GVecGetGrid(v, &grid);
642:   VecGetSize(v, &size);
643:   if (size != order->numVars) SETERRQ(PETSC_ERR_ARG_INCOMP, "Vector size does not conform to ordering");
644:   /* Should check that fields are in the local order */

646:   (*grid->ops->gvecevaluatefunction)(grid, v, order, f, alpha, ctx);
647:   return(0);
648: }

650: /*@
651:   GVecEvaluateFunctionGalerkin - Evaluates the weak form of a function over
652:   a field on the locations defined by the underlying grid and its discretization.

654:   Input Parameter:
655: . v         - The grid vector
656: . numFields - The number of fields to evaluate
657: . fields    - The fields
658: . f         - The user provided function
659: . alpha     - A scalar multiplier
660: . ctx       - An optional user provided context for the function

662:   Level: intermediate

664: .seealso: GVecEvaluateFunction,GMatEvaluateFunctionGalerkin
665: @*/
666: int GVecEvaluateFunctionGalerkin(GVec v, int numFields, int *fields, PointFunction f, PetscScalar alpha, void *ctx)
667: {
668:   Grid grid;
669:   int  fieldIdx;
670:   int  ierr;


676:   GVecGetGrid(v, &grid);
677:   for(fieldIdx = 0; fieldIdx < numFields; fieldIdx++) {
678:     if ((fields[fieldIdx] < 0) || (fields[fieldIdx] > grid->numFields)) {
679:       SETERRQ(PETSC_ERR_ARG_WRONG, "Invalid field number");
680:     }
681:   }
682:   /* Should check that fields are in the local order */
683:   (*grid->ops->gvecevaluatefunctiongalerkin)(grid, v, numFields, fields, grid->locOrder, f, alpha, ctx);
684: 
685:   return(0);
686: }

688: /*@
689:   GVecEvaluateFunctionGalerkinCollective - Evaluates the weak form of a function over
690:   a field on the locations defined by the underlying grid and its discretization. The
691:   only difference between it and GVecEvaluateFunctionGalerkin(), is that each processor
692:   is guaranteed to call f at each iteration, possibly with null arguments.

694:   Input Parameter:
695: . v         - The grid vector
696: . numFields - The number of fields to evaluate
697: . fields    - The fields
698: . f         - The user provided function
699: . alpha     - A scalar multiplier
700: . ctx       - An optional user provided context for the function

702:   Level: intermediate

704: .seealso: GVecEvaluateFunction,GMatEvaluateFunctionGalerkin
705: @*/
706: int GVecEvaluateFunctionGalerkinCollective(GVec v, int numFields, int *fields, PointFunction f, PetscScalar alpha, void *ctx)
707: {
708:   Grid grid;
709:   int  fieldIdx;
710:   int  ierr;


716:   GVecGetGrid(v, &grid);
717:   for(fieldIdx = 0; fieldIdx < numFields; fieldIdx++) {
718:     if ((fields[fieldIdx] < 0) || (fields[fieldIdx] > grid->numFields)) {
719:       SETERRQ(PETSC_ERR_ARG_WRONG, "Invalid field number");
720:     }
721:   }
722:   /* Should check that fields are in the local order */
723:   (*grid->ops->gvecevaluatefunctiongalerkincollective)(grid, v, numFields, fields, grid->locOrder, f, alpha, ctx);
724: 
725:   return(0);
726: }

728: /*@
729:   GVecEvaluateBoundaryFunctionGalerkin - Evaluates the weak form of a function over
730:   a field on the locations defined by the boundary of the underlying mesh and its
731:   discretization.

733:   Input Parameter:
734: . v         - The grid vector
735: . numFields - The number of fields to evaluate
736: . fields    - The fields
737: . f         - The user provided function
738: . alpha     - A scalar multiplier
739: . ctx       - An optional user provided context for the function

741:   Level: intermediate

743: .seealso: GVecEvaluateFunction,GMatEvaluateFunctionGalerkin
744: @*/
745: int GVecEvaluateBoundaryFunctionGalerkin(GVec v, int numFields, int *fields, PointFunction f, PetscScalar alpha, void *ctx)
746: {
747:   Grid grid;
748:   int  fieldIdx;
749:   int  ierr;


755:   GVecGetGrid(v, &grid);
756:   for(fieldIdx = 0; fieldIdx < numFields; fieldIdx++) {
757:     if ((fields[fieldIdx] < 0) || (fields[fieldIdx] > grid->numFields)) {
758:       SETERRQ(PETSC_ERR_ARG_WRONG, "Invalid field number");
759:     }
760:   }
761:   /* Should check that fields are in the local order */
762:   (*grid->ops->gvecevaluateboundaryfunctiongalerkin)(grid, v, numFields, fields, grid->bdLocOrder, f, alpha, ctx);
763: 
764:   return(0);
765: }

767: /*@
768:   GVecEvaluateBoundaryFunctionGalerkinCollective - Evaluates the weak form of a
769:   function over a field on the locations defined by the boundary of the underlying
770:   mesh and its discretization. The only difference between it and
771:   GVecEvaluateBoundaryFunctionGalerkin(), is that each processor is guaranteed to
772:   call f at each iteration, possibly with null arguments.

774:   Input Parameter:
775: . v         - The grid vector
776: . numFields - The number of fields to evaluate
777: . fields    - The fields
778: . f         - The user provided function
779: . alpha     - A scalar multiplier
780: . ctx       - An optional user provided context for the function

782:   Level: intermediate

784: .seealso: GVecEvaluateFunction,GMatEvaluateFunctionGalerkin
785: @*/
786: int GVecEvaluateBoundaryFunctionGalerkinCollective(GVec v, int numFields, int *fields, PointFunction f, PetscScalar alpha, void *ctx)
787: {
788:   Grid grid;
789:   int  fieldIdx;
790:   int  ierr;


796:   GVecGetGrid(v, &grid);
797:   for(fieldIdx = 0; fieldIdx < numFields; fieldIdx++) {
798:     if ((fields[fieldIdx] < 0) || (fields[fieldIdx] > grid->numFields)) {
799:       SETERRQ(PETSC_ERR_ARG_WRONG, "Invalid field number");
800:     }
801:   }
802:   /* Should check that fields are in the local order */
803:   (*grid->ops->gvecevaluateboundaryfunctiongalerkincollective)(grid, v, numFields, fields, grid->bdLocOrder, f, alpha, ctx);
804: 
805:   return(0);
806: }

808: /*@
809:   GVecEvaluateNonlinearOperatorGalerkin - Evaluates the weak form of a nonlinear operator
810:   over a field on the locations defined by the underlying grid and its discretization.

812:   Collective on GVec

814:   Input Parameter:
815: . v         - The grid vector
816: . n         - The number of input vectors
817: . vecs      - The input vectors
818: . numFields - The number of fields to evaluate
819: . fields    - The fields
820: . op        - The nonlinear operator
821: . alpha     - The scalar multiplier
822: . isALE     - The flag for ALE operators
823: . ctx       - [Optional] A user provided context for the function

825:   Level: intermediate

827: .keywords: grid vector, nonlinear, operator, galerkin
828: .seealso: GVecEvaluateFunctionGalerkin()
829: @*/
830: int GVecEvaluateNonlinearOperatorGalerkin(GVec v, int n, GVec vecs[], int numFields, int *fields, NonlinearOperator op,
831:                                           PetscScalar alpha, PetscTruth isALE, void *ctx)
832: {
833:   Grid grid;
834:   GVec x, y;
835:   int  fieldIdx;
836:   int  ierr;


842:   GVecGetGrid(v, &grid);
843:   for(fieldIdx = 0; fieldIdx < numFields; fieldIdx++) {
844:     if ((fields[fieldIdx] < 0) || (fields[fieldIdx] > grid->numFields)) {
845:       SETERRQ(PETSC_ERR_ARG_WRONG, "Invalid field number");
846:     }
847:   }
848:   switch(n) {
849:   case 1:
850:     x = y = vecs[0];
851:     break;
852:   case 2:
853:     x = vecs[0];
854:     y = vecs[1];
855:     break;
856:   default:
857:       SETERRQ(PETSC_ERR_ARG_OUTOFRANGE, "Only quadratic operators supported");
858:   }
859:   /* Should check that fields are in the local order */
860:   (*grid->ops->gvecevaluatenonlinearoperatorgalerkin)(grid, v, x, y, numFields, fields, grid->locOrder, op, alpha,
861:                                                              isALE, ctx);
862: 
863:   return(0);
864: }

866: /*@
867:    GVecEvaluateOperatorGalerkin - Evaluates the weak form of an operator over
868:    a field on the locations defined by the underlying grid and its discretization.

870:    Input Parameter:
871: .  v      - The grid vector
872: .  x,y    - The input grid vectors, usually the previous solution
873: .  sField - The shape function field
874: .  tField - The test function field
875: .  op     - The operator
876: .  alpha  - A scalar multiplier
877: .  ctx    - An optional user provided context for the function

879:   Level: intermediate

881: .seealso: GVecEvaluateFunction,GMatEvaluateFunctionGalerkin
882: @*/
883: int GVecEvaluateOperatorGalerkin(GVec v, GVec x, GVec y, int sField, int tField, int op, PetscScalar alpha, void *ctx)
884: {
885:   Grid             grid;
886:   VarOrdering      sOldOrder, tOldOrder;
887:   VarOrdering      sOrder,    tOrder;
888:   LocalVarOrdering sLocOrder, tLocOrder;
889:   int              numFields;
890:   int              ierr;


897:   GVecGetGrid(v, &grid);
898:   /* Check for valid fields */
899:   GridGetNumFields(grid, &numFields);
900:   if ((sField < 0) || (sField >= numFields) || (tField < 0) || (tField >= numFields)) {
901:     SETERRQ(PETSC_ERR_ARG_WRONG, "Invalid field number");
902:   }
903:   /* Check for valid operator */
904:   if ((op < 0) || (op >= grid->fields[sField].disc->numOps) || (!grid->fields[sField].disc->operators[op])) {
905:     SETERRQ(PETSC_ERR_ARG_WRONG, "Invalid operator");
906:   }
907:   if ((grid->fields[sField].disc->operators[op]->precompInt == PETSC_NULL) &&
908:       (grid->fields[sField].disc->operators[op]->opFunc     == PETSC_NULL) &&
909:       (grid->fields[sField].disc->operators[op]->ALEOpFunc  == PETSC_NULL)) {
910:     SETERRQ(PETSC_ERR_ARG_WRONG, "Operator cannot be calculated");
911:   }
912:   if (grid->fields[sField].disc->numQuadPoints != grid->fields[tField].disc->numQuadPoints) {
913:     SETERRQ(PETSC_ERR_ARG_INCOMP, "Incompatible quadrature sets");
914:   }
915:   /* Create orderings */
916:   GVecGetOrder(x, &sOldOrder);
917:   VarOrderingCreateSubset(sOldOrder, 1, &sField, PETSC_FALSE, &sOrder);
918:   LocalVarOrderingCreate(grid, 1, &sField, &sLocOrder);
919:   GVecGetOrder(v, &tOldOrder);
920:   VarOrderingCreateSubset(tOldOrder, 1, &tField, PETSC_FALSE, &tOrder);
921:   LocalVarOrderingCreate(grid, 1, &tField, &tLocOrder);
922:   /* Calculate operator */
923:   (*grid->ops->gvecevaluateoperatorgalerkin)(grid, v, x, y, sOrder, sLocOrder, tOrder, tLocOrder, op, alpha, ctx);
924: 
925:   /* Destroy orderings */
926:   VarOrderingDestroy(sOrder);
927:   LocalVarOrderingDestroy(sLocOrder);
928:   VarOrderingDestroy(tOrder);
929:   LocalVarOrderingDestroy(tLocOrder);
930:   return(0);
931: }

933: /*@
934:   GVecEvaluateOperatorGalerkinRectangular - Evaluates the weak form of an operator over
935:   a field on the locations defined by the underlying grid and its discretization.

937:   Collective on GVec

939:   Input Parameters:
940: + v         - The grid vector
941: . x         - A grid vector, usually the previous solution
942: . sOrder    - The global variable ordering for the shape functions 
943: . sLocOrder - The local variable ordering for the shape functions 
944: . sOrder    - The global variable ordering for the test functions 
945: . sLocOrder - The local variable ordering for the test functions 
946: . op        - The operator
947: . alpha     - The scalar multiplier for the operator
948: - ctx       - [Optional] The user provided context for the function

950:   Level: intermediate

952: .keywords: grid vector, evaluate, operator, galerkin
953: .seealso: GVecEvaluateFunction(), GMatEvaluateFunctionGalerkin()
954: @*/
955: int GVecEvaluateOperatorGalerkinRectangular(GVec v, GVec x, VarOrdering sOrder, LocalVarOrdering sLocOrder,
956:                                             VarOrdering tOrder, LocalVarOrdering tLocOrder, int op, PetscScalar alpha,
957:                                             void *ctx)
958: {
959:   Grid          grid;
960:   FieldClassMap sMap, tMap;
961:   int           numTotalFields;
962:   int          *sFields, *tFields;
963:   int           f;
964:   int           ierr;


974:   GVecGetGrid(v, &grid);
975:   GridGetNumFields(grid, &numTotalFields);
976:   VarOrderingGetClassMap(sOrder, &sMap);
977:   VarOrderingGetClassMap(tOrder, &tMap);
978:   sFields = sMap->fields;
979:   tFields = tMap->fields;
980:   /* Check for compatible orderings */
981:   if ((tOrder->numVars != v->N) || (tOrder->numLocVars != v->n)) {
982:     SETERRQ(PETSC_ERR_ARG_INCOMP, "Incompatible test function ordering");
983:   }
984:   if ((sOrder->numVars != x->N) || (sOrder->numLocVars != x->n)) {
985:     SETERRQ(PETSC_ERR_ARG_INCOMP, "Incompatible shape function ordering");
986:   }
987:   if (sMap->numFields != tMap->numFields) {
988:     SETERRQ(PETSC_ERR_ARG_INCOMP, "Shape and test function orderings must have the same number of fields");
989:   }
990:   for(f = 0; f < sMap->numFields; f++) {
991:     if ((sFields[f] < 0) || (sFields[f] >= numTotalFields) || (tFields[f] < 0) || (tFields[f] >= numTotalFields)) {
992:       SETERRQ(PETSC_ERR_ARG_WRONG, "Invalid field number");
993:     }
994:     /* Check for valid operator */
995:     if ((op < 0) || (op >= grid->fields[sFields[f]].disc->numOps) || (!grid->fields[sFields[f]].disc->operators[op])) {
996:       SETERRQ(PETSC_ERR_ARG_WRONG, "Invalid operator");
997:     }
998:     if ((grid->fields[sFields[f]].disc->operators[op]->precompInt == PETSC_NULL) &&
999:         (grid->fields[sFields[f]].disc->operators[op]->opFunc     == PETSC_NULL) &&
1000:         (grid->fields[sFields[f]].disc->operators[op]->ALEOpFunc  == PETSC_NULL)) {
1001:       SETERRQ(PETSC_ERR_ARG_WRONG, "Invalid operator");
1002:     }
1003:     if (grid->fields[sFields[f]].disc->numQuadPoints != grid->fields[tFields[f]].disc->numQuadPoints) {
1004:       SETERRQ(PETSC_ERR_ARG_INCOMP, "Incompatible quadrature sets");
1005:     }
1006:   }
1007:   /* Calculate operator */
1008:   (*grid->ops->gvecevaluateoperatorgalerkin)(grid, v, x, x, sOrder, sLocOrder, tOrder, tLocOrder, op, alpha, ctx);
1009: 
1010:   return(0);
1011: }

1013: /*@
1014:    GVecEvaluateJacobian - Evaluates the action of the system matrix
1015:          over the active fields on the locations defined by the underlying grid and
1016:          its discretization.

1018:    Input Parameters:
1019: +  x   - The argument vector to the Jacobian
1020: .  y   - The vector to which the Jacobian is applied
1021: -  ctx - An optional user provided context for the function

1023:    Output Parameter:
1024: .  v   - The result vector J(x) y

1026:   Level: intermediate

1028: .seealso: GVecEvaluateFunction, GMatEvaluateFunctionGalerkin
1029: @*/
1030: int GVecEvaluateJacobian(GVec v, GVec x, GVec y, void *ctx)
1031: {
1032:   Grid      grid;
1033:   Mesh      mesh;
1034:   MeshMover mover;
1035:   Grid      meshVelGrid;
1036:   int       ierr;


1041:   GVecGetGrid(v, &grid);
1042:   if (grid->ALEActive == PETSC_TRUE) {
1043:     GridGetMesh(grid, &mesh);
1044:     MeshGetMover(mesh, &mover);
1045:     MeshMoverGetVelocityGrid(mover, &meshVelGrid);
1046:     GridSetUp(meshVelGrid);
1047:   }
1048:   PetscLogEventBegin(GVEC_EvaluateJacobian, v, grid, x, y);
1049:   (*grid->ops->gvecevaluatesystemmatrix)(grid, x, y, v, ctx);
1050:   PetscLogEventEnd(GVEC_EvaluateJacobian, v, grid, x, y);
1051:   return(0);
1052: }

1054: /*@
1055:    GVecEvaluateJacobianDiagonal - Evaluates the action of the diagonal of the
1056:    system matrix over the active fields on the locations defined by the underlying
1057:    grid and its discretization.

1059:    Input Parameters:
1060: +  x   - The argument vector to the Jacobian
1061: -  ctx - An optional user provided context for the function

1063:    Output Parameter:
1064: .  d   - The diagonal of J(x)

1066:   Level: intermediate

1068: .seealso: GVecEvaluateJacobian, GVecEvaluateFunction
1069: @*/
1070: int GVecEvaluateJacobianDiagonal(GVec d, GVec x, void *ctx)
1071: {
1072:   Grid      grid;
1073:   Mesh      mesh;
1074:   MeshMover mover;
1075:   Grid      meshVelGrid;
1076:   int       ierr;


1081:   GVecGetGrid(d, &grid);
1082:   if (grid->ALEActive == PETSC_TRUE) {
1083:     GridGetMesh(grid, &mesh);
1084:     MeshGetMover(mesh, &mover);
1085:     MeshMoverGetVelocityGrid(mover, &meshVelGrid);
1086:     GridSetUp(meshVelGrid);
1087:   }
1088:   PetscLogEventBegin(GVEC_EvaluateJacobian, d, grid, x, 0);
1089:   (*grid->ops->gvecevaluatesystemmatrixdiagonal)(grid, x, d, ctx);
1090:   PetscLogEventEnd(GVEC_EvaluateJacobian, d, grid, x, 0);
1091:   return(0);
1092: }

1094: /*@
1095:   GVecEvaluateJacobianConstrained - Evaluates the action of the Jacobian
1096:   of the extra fields introduced by the constraints.

1098:   Collective on GVec

1100:   Input Parameter:
1101: . v - The grid vector

1103:   Output Parameter
1104: . x - The action of the constraint Jacobian 

1106:   Level: intermediate

1108: .keywords: grid vector, jacobian, evaluate, constraint
1109: .seealso: GVecEvaluateFunction(), GVecEvaluateRhs()
1110: @*/
1111: int GVecEvaluateJacobianConstrained(GVec v, GVec x)
1112: {
1113:   Grid       grid;
1114:   PetscTruth isConstrained;
1115:   int        ierr;


1121:   GVecGetGrid(v, &grid);
1122:   GridIsConstrained(grid, &isConstrained);
1123:   if ((isConstrained == PETSC_TRUE) && (grid->constraintCtx->ops->applyjac)) {
1124:     (*grid->constraintCtx->ops->applyjac)(grid, x, v);
1125:   }
1126:   return(0);
1127: }

1129: /*@
1130:   GVecSolveJacobianConstrained - Evaluates the action of the inverse of
1131:   the Jacobian of the extra fields introduced by the constraints.

1133:   Collective on GVec

1135:   Input Parameter:
1136: . v - The grid vector

1138:   Output Parameter:
1139: . x - The action of the inverse of the constraint Jacobian

1141:   Level: intermediate

1143: .keywords: grid vector, jacobian, solve, constraint
1144: .seealso: GVecEvaluateFunction(), GVecEvaluateRhs()
1145: @*/
1146: int GVecSolveJacobianConstrained(GVec v, GVec x)
1147: {
1148:   Grid       grid;
1149:   PetscTruth isConstrained;
1150:   int        ierr;


1156:   GVecGetGrid(v, &grid);
1157:   GridIsConstrained(grid, &isConstrained);
1158:   if ((isConstrained == PETSC_TRUE) && (grid->constraintCtx->ops->solvejac)) {
1159:     (*grid->constraintCtx->ops->solvejac)(grid, x, v);
1160:   }
1161:   return(0);
1162: }

1164: /*@
1165:    GVecSetBoundary - Applies the specified Dirichlet boundary conditions to this vector.

1167:    Input Parameter:
1168: .  v   - The grid vector
1169: .  ctx - An optional user provided context for the function

1171:   Level: intermediate

1173: .seealso: GridSetBC(), GridAddBC()
1174: @*/
1175: int GVecSetBoundary(GVec v, void *ctx)
1176: {
1177:   Grid           grid;
1178:   int            bc;
1179:   int            num;
1180:   int           *bd;
1181:   int           *field;
1182:   PointFunction *func;
1183:   int            ierr;

1187:   GVecGetGrid(v, &grid);
1188:   PetscLogEventBegin(GVEC_SetBoundary, v, grid, 0, 0);
1189:   for(bc = 0, num = 0; bc < grid->numBC; bc++) {
1190:     if (grid->bc[bc].reduce == PETSC_FALSE) num++;
1191:   }
1192:   if (num > 0) {
1193:     PetscMalloc(num * sizeof(int),             &bd);
1194:     PetscMalloc(num * sizeof(int),             &field);
1195:     PetscMalloc(num * sizeof(PointFunction *), &func);
1196:     for(bc = 0, num = 0; bc < grid->numBC; bc++) {
1197:       if (grid->bc[bc].reduce == PETSC_FALSE) {
1198:         bd[num]     = grid->bc[bc].boundary;
1199:         field[num]  = grid->bc[bc].field;
1200:         func[num++] = grid->bc[bc].func;
1201:       }
1202:     }
1203:     GridSetVecBoundaryRectangular(num, bd, field, func, grid->order, v, ctx);
1204:     PetscFree(bd);
1205:     PetscFree(field);
1206:     PetscFree(func);
1207:   }
1208:   for(bc = 0; bc < grid->numPointBC; bc++) {
1209:     if (grid->pointBC[bc].reduce == PETSC_FALSE) {
1210:       GridSetVecPointBoundary(grid->pointBC[bc].node, grid->pointBC[bc].field, grid->pointBC[bc].func, v, ctx);
1211:     }
1212:   }
1213:   PetscLogEventEnd(GVEC_SetBoundary, v, grid, 0, 0);
1214:   return(0);
1215: }

1217: /*@
1218:    GVecSetBoundaryZero - Applies a Dirichlet boundary condition of zero to the specified
1219:    fields in this vector. This is mainly used to double up the boundary conditions in a
1220:    time dependent nonlinear problem. The original boundary condition is used to initialize
1221:    the solution at the start of the Newton iteration, and this is used to set the corresponding
1222:    components of the residual to zero.

1224:    Input Parameter:
1225: .  v   - The grid vector
1226: .  ctx - An optional user provided context for the function

1228:   Level: intermediate

1230: .seealso: GridSetBC(), GridAddBC()
1231: @*/
1232: int GVecSetBoundaryZero(GVec v, void *ctx)
1233: {
1234:   Grid grid;
1235:   int  bc;
1236:   int  ierr;

1240:   GVecGetGrid(v, &grid);
1241:   for(bc = 0; bc < grid->numBC; bc++) {
1242:     if (grid->bc[bc].reduce == PETSC_FALSE) {
1243:       GridSetVecBoundary(grid->bc[bc].boundary, grid->bc[bc].field, PointFunctionZero, v, ctx);
1244:     }
1245:   }
1246:   for(bc = 0; bc < grid->numPointBC; bc++) {
1247:     if (grid->pointBC[bc].reduce == PETSC_FALSE) {
1248:       GridSetVecPointBoundary(grid->pointBC[bc].node, grid->pointBC[bc].field, PointFunctionZero, v, ctx);
1249:     }
1250:   }
1251:   return(0);
1252: }

1254: /*@
1255:    GVecSetBoundaryDifference - Applies the specified Dirichlet boundary conditions
1256:    to this vector, but uses the difference of the value in the given vector and the
1257:    computed value.

1259:    Input Parameter:
1260: .  v   - The grid vector
1261: .  u   - A grid vector, usually the previous solution
1262: .  ctx - An optional user provided context for the function

1264:   Level: intermediate

1266: .seealso: GridSetVecBoundaryDifference(), GridSetBC(), GridAddBC()
1267: @*/
1268: int GVecSetBoundaryDifference(GVec v, GVec u, void *ctx)
1269: {
1270:   Grid grid;
1271:   int  bc;
1272:   int  ierr;

1277:   GVecGetGrid(v, &grid);
1278:   for(bc = 0; bc < grid->numBC; bc++) {
1279:     if (grid->bc[bc].reduce == PETSC_FALSE) {
1280:       GridSetVecBoundaryDifference(grid->bc[bc].boundary, grid->bc[bc].field, u, grid->bc[bc].func, v, ctx);
1281:     }
1282:   }
1283:   for(bc = 0; bc < grid->numPointBC; bc++) {
1284:     if (grid->pointBC[bc].reduce == PETSC_FALSE) {
1285:       GridSetVecPointBoundaryDifference(grid->pointBC[bc].node, grid->pointBC[bc].field, u, grid->pointBC[bc].func, v, ctx);
1286:     }
1287:   }
1288:   return(0);
1289: }

1291: /*@
1292:   GVecInterpolateField - Interpolates the field defined by the grid
1293:   vector to the specified point, and stores the components in the
1294:   array provided.

1296:   Input Parameter:
1297: . v      - The grid vector
1298: . x,y,z  - The interpolation point
1299: . ctx    - An InterpolationContext

1301:   Output Parameter:
1302: . values - The interpolated field at the point

1304:   Level: intermediate

1306: .keywords: grid vector, interpolation
1307: .seealso: GVecEvaluateFunction, GVecEvaluateFunctionGalerkin
1308: @*/
1309: int GVecInterpolateField(GVec v, int field, double x, double y, double z, PetscScalar *values, InterpolationContext *ctx)
1310: {
1311:   Grid           grid;
1312:   Discretization disc;
1313:   int           *elemStart;
1314:   ElementVec     vec;
1315:   PetscScalar   *array;
1316:   double         coords[3];
1317:   double        *tmp;
1318:   MPI_Status     status;
1319:   PetscTruth     lost;
1320:   /*double         lostDist, lostX, lostY, lostZ;*/
1321:   int            numFound, numSent;
1322:   int            comp, elem, newElem, closeNode;
1323:   int            proc, var;
1324:   int            ierr;

1328:   ierr      = GVecGetGrid(v, &grid);
1329:   PetscLogEventBegin(GVEC_InterpolateField, v, grid, 0, 0);
1330:   disc      = grid->fields[field].disc;
1331:   comp      = disc->comp;
1332:   elemStart = grid->locOrder->elemStart;
1333:   vec       = grid->ghostElementVec;
1334:   array     = &vec->array[elemStart[field]];

1336:   /* Watch out for null collective call */
1337:   if (values == PETSC_NULL) {
1338:     /* Mark point as inactive */
1339:     elem = -2;
1340:   } else {
1341:     /* Locate the point in the mesh */
1342:     MeshLocatePoint(ctx->mesh, x, y, 0.0, &elem);
1343:   }

1345:   if (elem >= 0) {
1346:     /* Initialize element vector */
1347:     ElementVecZero(vec);
1348:     vec->reduceSize = grid->locOrder->elemSize;
1349:     /* Get indices for the old element */
1350:     GridCalcInterpolationElementVecIndices(grid, ctx->mesh, elem, ctx->order, vec);
1351:     /* Get values from local vector to element vector */
1352:     GridLocalToElementGeneral(grid, ctx->ghostVec, grid->bdReduceVecCur, grid->reduceSystem, grid->reduceElement, vec);
1353:     if (grid->explicitConstraints == PETSC_TRUE) {
1354:       /* Must transform to unconstrained variables for element integrals */
1355:       GridProjectInterpolationElementVec(grid, ctx->mesh, elem, ctx->order, PETSC_FALSE, vec);
1356:     }
1357:     /* Interpolate data to get field at new point */
1358:     DiscretizationInterpolateField(disc, ctx->mesh, elem, x, y, 0.0, array, values, INTERPOLATION_LOCAL);
1359:   } else if (elem != -2) {
1360:     if (ctx->numProcs == 1) {
1361:       SETERRQ2(PETSC_ERR_ARG_WRONG, "Node (%g,%g) not found in new mesh", x, y);
1362:     } else if (ctx->batchMode == PETSC_TRUE) {
1363:       if (ctx->numPoints[ctx->rank] >= ctx->maxPoints) {
1364:         PetscMalloc(ctx->maxPoints*6 * sizeof(double), &tmp);
1365:         PetscMemcpy(tmp, ctx->coords, ctx->maxPoints*3 * sizeof(double));
1366:         PetscFree(ctx->coords);
1367:         ctx->maxPoints *= 2;
1368:         ctx->coords     = tmp;
1369:       }
1370:       ctx->coords[ctx->numPoints[ctx->rank]*3]   = x;
1371:       ctx->coords[ctx->numPoints[ctx->rank]*3+1] = y;
1372:       ctx->coords[ctx->numPoints[ctx->rank]*3+2] = z;
1373:       ctx->numPoints[ctx->rank]++;
1374:       for(var = 0; var < comp; var++)
1375:         values[var] = 0.0;
1376:     }
1377:   }

1379:   if ((ctx->numProcs > 1) && (ctx->batchMode == PETSC_FALSE)) {
1380:     /* Communicate missing nodes */
1381:     MPI_Allgather(&elem, 1, MPI_INT, ctx->activeProcs, 1, MPI_INT, grid->comm);
1382:     coords[0] = x; coords[1] = y; coords[2] = z;
1383:     MPI_Allgather(coords, 3, MPI_DOUBLE, ctx->coords, 3, MPI_DOUBLE, grid->comm);

1385:     /* Handle each point */
1386:     numFound = 0;
1387:     for(proc = 0; proc < ctx->numProcs; proc++) {
1388:       /* If the point was not located by someone else */
1389:       if ((ctx->activeProcs[proc] == -1) && (proc != ctx->rank)) {
1390:         MeshLocatePoint(ctx->mesh, ctx->coords[proc*3], ctx->coords[proc*3+1], 0.0, &newElem);
1391:         if (newElem >= 0) {
1392:           /* Mark point as found by this processor */
1393:           ctx->activeProcs[proc] = ctx->rank;
1394:           /* Get indices for the old element */
1395:           GridCalcInterpolationElementVecIndices(grid, ctx->mesh, newElem, ctx->order, vec);
1396:           /* Get values from local vector to element vector */
1397:           GridLocalToElementGeneral(grid, ctx->ghostVec, grid->bdReduceVecCur, grid->reduceSystem, grid->reduceElement, vec);
1398:           if (grid->explicitConstraints == PETSC_TRUE) {
1399:             /* Must transform to unconstrained variables for element integrals */
1400:             GridProjectInterpolationElementVec(grid, ctx->mesh, newElem, ctx->order, PETSC_FALSE, vec);
1401:           }
1402:           /* Interpolate data to get field at new point */
1403:           DiscretizationInterpolateField(disc, ctx->mesh, newElem, ctx->coords[proc*3], ctx->coords[proc*3+1], 0.0,
1404:                                                 array, &ctx->values[proc*comp], INTERPOLATION_LOCAL);
1405: 
1406:           numFound++;
1407:         }
1408:       } else {
1409:         /* Mark point as inactive */
1410:         ctx->activeProcs[proc] = -2;
1411:       }
1412:     }

1414:     /* Extra step to handle nodes which slip outside the boundary */
1415:     MPI_Allreduce(ctx->activeProcs, ctx->calcProcs, ctx->numProcs, MPI_INT, MPI_MAX, grid->comm);
1416:     lost = PETSC_FALSE;
1417:     for(proc = 0; proc < ctx->numProcs; proc++) {
1418:       if (ctx->calcProcs[proc] == -1) {
1419:         /* PetscPrintf(grid->comm, "Lost point:n ");
1420:         for(var = 0; var < ctx->numProcs; var++) PetscPrintf(grid->comm, " aProc[%d]: %d", var, ctx->activeProcs[var]);
1421:         PetscPrintf(grid->comm, "n");
1422:         for(var = 0; var < ctx->numProcs; var++) PetscPrintf(grid->comm, " cProc[%d]: %d", var, ctx->calcProcs[var]);
1423:         PetscPrintf(grid->comm, "n");
1424:         PetscPrintf(grid->comm, "  (%g, %g)n", ctx->coords[proc*3], ctx->coords[proc*3+1]);*/
1425:         lost = PETSC_TRUE;
1426:         /* No one found it so search for the closest element */
1427:         MeshGetNearestNode(ctx->mesh, ctx->coords[proc*3], ctx->coords[proc*3+1], 0.0, PETSC_TRUE, &closeNode);
1428:         if (closeNode >= 0) {
1429:           MeshGetElementFromNode(ctx->mesh, closeNode, &newElem);
1430:           /* PetscPrintf(PETSC_COMM_SELF, "  found lost point on processor %d in element %dn", ctx->rank, newElem);
1431:           MeshGetNodeCoords(ctx->mesh, closeNode, &lostX, &lostY, &lostZ);                         
1432:           lostDist = PetscSqr(MeshPeriodicDiffX(ctx->mesh, lostX - ctx->coords[proc*3])) +
1433:             PetscSqr(MeshPeriodicDiffY(ctx->mesh, lostY - ctx->coords[proc*3+1]));
1434:           PetscPrintf(PETSC_COMM_SELF, "  at (%g, %g), it was %g away from the pointn", lostX, lostY, lostDist); */
1435:           /* Mark point as found by this processor */
1436:           ctx->activeProcs[proc] = ctx->rank;
1437:           /* Get indices for the old element */
1438:           GridCalcInterpolationElementVecIndices(grid, ctx->mesh, newElem, ctx->order, vec);
1439:           /* Get values from local vector to element vector */
1440:           GridLocalToElementGeneral(grid, ctx->ghostVec, grid->bdReduceVecCur, grid->reduceSystem, grid->reduceElement, vec);
1441:           if (grid->explicitConstraints == PETSC_TRUE) {
1442:             /* Must transform to unconstrained variables for element integrals */
1443:             GridProjectInterpolationElementVec(grid, ctx->mesh, newElem, ctx->order, PETSC_FALSE, vec);
1444:           }
1445:           /* Interpolate data to get field at new point */
1446:           DiscretizationInterpolateField(disc, ctx->mesh, newElem, ctx->coords[proc*3], ctx->coords[proc*3+1], 0.0,
1447:                                                 array, &ctx->values[proc*comp], INTERPOLATION_LOCAL);
1448: 
1449:           numFound++;
1450:         }
1451:       }
1452:     }

1454:     /* Communicate interpolated values */
1455:     if (lost == PETSC_TRUE) {
1456:       MPI_Allreduce(ctx->activeProcs, ctx->calcProcs, ctx->numProcs, MPI_INT, MPI_MAX, grid->comm);
1457:       /* PetscPrintf(grid->comm, "Recommunicated interpolated valuesn ");
1458:       for(var = 0; var < ctx->numProcs; var++) PetscPrintf(grid->comm, " aProc[%d]: %d", var, ctx->activeProcs[var]);
1459:       PetscPrintf(grid->comm, "n");
1460:       for(var = 0; var < ctx->numProcs; var++) PetscPrintf(grid->comm, " cProc[%d]: %d", var, ctx->calcProcs[var]);
1461:       PetscPrintf(grid->comm, "n"); */
1462:     }
1463:     numSent = 0;
1464:     for(proc = 0; proc < ctx->numProcs; proc++) {
1465:       if ((elem == -1) && (ctx->rank == proc)) {
1466:         /* Point from this domain was not found */
1467:         if (ctx->calcProcs[proc] < 0) {
1468:           SETERRQ2(PETSC_ERR_ARG_WRONG, "Node not found at (%g,%g)", x, y);
1469:         }
1470:         /* Must check for lost points found in the same domain */
1471:         if (ctx->calcProcs[proc] != ctx->rank) {
1472:           MPI_Recv(values, comp, MPI_DOUBLE, ctx->calcProcs[proc], 0, grid->comm, &status);
1473:         } else {
1474:           for(var = 0; var < comp; var++) values[var] = ctx->values[proc*comp+var];
1475:           numSent++;
1476:         }
1477: #if 0
1478:         PetscPrintf(PETSC_COMM_SELF, "Immediate point %d on proc %dn", ctx->curPoint++, ctx->rank);
1479:         PetscPrintf(PETSC_COMM_SELF, "  x: %g y: %g z: %gn  ", ctx->coords[proc*3], ctx->coords[proc*3+1], ctx->coords[proc*3+2]);
1480:         for(c = 0; c < comp; c++) {
1481:           PetscPrintf(PETSC_COMM_SELF, "val[%d]: %g ", c, values[c]);
1482:         }
1483:         PetscPrintf(PETSC_COMM_SELF, "n");
1484: #endif
1485:       } else if (ctx->rank == ctx->calcProcs[proc]) {
1486:         /* Point from another domain was found here */
1487:         MPI_Send(&ctx->values[proc*comp], comp, MPI_DOUBLE, proc, 0, grid->comm);
1488:         numSent++;
1489:       } else if (ctx->rank == ctx->activeProcs[proc]) {
1490:         /* Point was found by multiple processors */
1491:         if (ctx->calcProcs[proc] < ctx->rank) {
1492:           SETERRQ(PETSC_ERR_ARG_CORRUPT, "Invalid precendence during interpolation");
1493:         }
1494:         numSent++;
1495:       }
1496:     }
1497:     if (numSent != numFound) {
1498:       SETERRQ2(PETSC_ERR_ARG_CORRUPT, "Number of nodes found %d does not match number sent %d", numFound, numSent);
1499:     }
1500:     /* if (lost == PETSC_TRUE) {
1501:       PetscBarrier((PetscObject) grid);
1502:       PetscPrintf(grid->comm, "Finished interpolation after lost pointn");
1503:     }*/
1504:   }

1506:   PetscLogEventEnd(GVEC_InterpolateField, v, grid, 0, 0);
1507:   return(0);
1508: }

1510: int GVecInterpolateFieldBatch(GVec v, int field, InterpolationContext *ctx)
1511: {
1512:   Grid           grid;
1513:   Discretization disc;
1514:   int            comp;
1515:   ElementVec     vec;
1516:   int            numProcs = ctx->numProcs;
1517:   int            rank     = ctx->rank;
1518:   int           *numCoords, *cOffsets;
1519:   int           *activePoints, *calcPoints;
1520:   double        *coords;
1521:   PetscScalar   *values, *array;
1522:   int            totPoints;
1523:   int            numFound;
1524:   int            proc, p, elem;
1525:   int            ierr;

1528:   if ((ctx->numProcs == 1) || (ctx->batchMode == PETSC_FALSE))
1529:     return(0);

1532:   ierr  = GVecGetGrid(v, &grid);
1533:   PetscLogEventBegin(GVEC_InterpolateFieldBatch, v, grid, 0, 0);
1534:   disc  = grid->fields[field].disc;
1535:   comp  = disc->comp;
1536:   vec   = grid->ghostElementVec;
1537:   array = &vec->array[grid->locOrder->elemStart[field]];
1538:   /* Get the number of points from each domain */
1539:   PetscMalloc((numProcs+1) * sizeof(int), &numCoords);
1540:   PetscMalloc((numProcs+1) * sizeof(int), &cOffsets);
1541:   PetscLogEventBegin(GVEC_InterpolateFieldBatchParallel, v, grid, 0, 0);
1542:   MPI_Allgather(&ctx->numPoints[rank], 1, MPI_INT, &ctx->numPoints[1], 1, MPI_INT, v->comm);
1543:   PetscLogEventEnd(GVEC_InterpolateFieldBatchParallel, v, grid, 0, 0);
1544:   /* Get the coordintes of each point */
1545:   ctx->numPoints[0] = 0;
1546:   cOffsets[0]       = 0;
1547:   for(proc = 0; proc < numProcs; proc++) {
1548:     numCoords[proc]         = ctx->numPoints[proc+1]*3;
1549:     ctx->numPoints[proc+1] += ctx->numPoints[proc];
1550:     cOffsets[proc+1]        = ctx->numPoints[proc+1]*3;
1551:   }
1552:   totPoints = ctx->numPoints[numProcs];
1553:   PetscMalloc(totPoints*3 * sizeof(double), &coords);
1554:   PetscLogEventBegin(GVEC_InterpolateFieldBatchParallel, v, grid, 0, 0);
1555:   MPI_Allgatherv(ctx->coords, numCoords[rank], MPI_DOUBLE, coords, numCoords, cOffsets, MPI_DOUBLE, v->comm);
1556:   PetscLogEventEnd(GVEC_InterpolateFieldBatchParallel, v, grid, 0, 0);

1558:   PetscMalloc(totPoints      * sizeof(int),         &activePoints);
1559:   PetscMalloc(totPoints      * sizeof(int),         &calcPoints);
1560:   PetscMalloc(totPoints*comp * sizeof(PetscScalar), &values);
1561:   PetscMalloc(totPoints*comp * sizeof(PetscScalar), &ctx->values);
1562:   PetscMemzero(values, totPoints*comp * sizeof(PetscScalar));
1563:   PetscLogEventBegin(GVEC_InterpolateFieldBatchCalc, v, grid, 0, 0);
1564:   /* Mark this domain's points as inactive */
1565:   for(p = 0; p < totPoints; p++) {
1566:     if ((p >= ctx->numPoints[rank]) && (p < ctx->numPoints[rank+1]))
1567:       activePoints[p] = -2;
1568:     else
1569:       activePoints[p] = -1;
1570:   }
1571:   /* Handle each point */
1572:   for(p = 0, numFound = 0; p < totPoints; p++)
1573:   {
1574:     if (activePoints[p] > -2) {
1575:       MeshLocatePoint(ctx->mesh, coords[p*3], coords[p*3+1], coords[p*3+2], &elem);
1576:       if (elem >= 0) {
1577:         /* Mark point as found by this processor */
1578:         activePoints[p] = rank;
1579:         /* Get indices for the old element */
1580:         GridCalcInterpolationElementVecIndices(grid, ctx->mesh, elem, ctx->order, vec);
1581:         /* Get values from local vector to element vector */
1582:         GridLocalToElementGeneral(grid, ctx->ghostVec, grid->bdReduceVecCur, grid->reduceSystem, grid->reduceElement, vec);
1583:         if (grid->explicitConstraints == PETSC_TRUE) {
1584:           /* Must transform to unconstrained variables for element integrals */
1585:           GridProjectInterpolationElementVec(grid, ctx->mesh, elem, ctx->order, PETSC_FALSE, vec);
1586:         }
1587:         /* Interpolate data to get field at new point */
1588:         DiscretizationInterpolateField(disc, ctx->mesh, elem, coords[p*3], coords[p*3+1], coords[p*3+2],
1589:                                               array, &values[p*comp], INTERPOLATION_LOCAL);
1590: 
1591:         numFound++;
1592:       }
1593:     }
1594:   }
1595:   PetscLogEventEnd(GVEC_InterpolateFieldBatchCalc, v, grid, 0, 0);

1597:   /* Communicate interpolated values */
1598: #if 1
1599:   /* This is REALLY bad but probably better than what is there now */
1600:   PetscLogEventBegin(GVEC_InterpolateFieldBatchParallel, v, grid, 0, 0);
1601:   MPI_Allreduce(values, ctx->values, totPoints*comp, MPI_DOUBLE, MPI_SUM, v->comm);
1602:   PetscLogEventEnd(GVEC_InterpolateFieldBatchParallel, v, grid, 0, 0);
1603: #else
1604:   MPI_Allreduce(activePoints, calcPoints, totPoints, MPI_INT, MPI_MAX, v->comm);
1605:   for(proc = 0, numSent = 0; proc < numProcs; proc++)
1606:     for(p = ctx->numPoints[proc]; p < ctx->numPoints[proc+1]; p++)
1607:     {
1608:       if (rank == proc)
1609:       {
1610:         /* Point from this domain was not found */
1611:         if (calcPoints[p] < 0) {
1612:           PetscLogInfo(v, "[%d]x: %g y: %g z: %gn", rank, coords[p*3], coords[p*3+1], coords[p*3+2]);
1613:           SETERRQ(PETSC_ERR_PLIB, "Node not found");
1614:         }
1615:         MPI_Recv(&ctx->values[p*comp], comp, MPI_DOUBLE, calcPoints[p], 0, v->comm, &status);
1616:       } else if (rank == calcPoints[p]) {
1617:         /* Point from another domain was found here */
1618:         MPI_Send(&values[p*comp], comp, MPI_DOUBLE, proc, 0, v->comm);
1619:         numSent++;
1620:       } else if (rank == activePoints[p]) {
1621:         /* Point was found by multiple processors */
1622:         if (calcPoints[p] < rank) SETERRQ(PETSC_ERR_ARG_CORRUPT, "Invalid precendence during interpolation");
1623:         numSent++;
1624:       }
1625:     }
1626:   if (numSent != numFound) SETERRQ(PETSC_ERR_ARG_CORRUPT, "Number of nodes found does not match number sent");
1627: #endif
1628:   PetscFree(numCoords);
1629:   PetscFree(activePoints);
1630:   PetscFree(calcPoints);
1631:   PetscFree(values);
1632:   PetscLogEventEnd(GVEC_InterpolateFieldBatch, v, grid, 0, 0);
1633:   return(0);
1634: }

1636: /*@
1637:   GVecCreate - Creates a grid vector associated with a particular grid.

1639:   Input Parameter:
1640: . grid - The grid defining the discrete function

1642:   Output Parameter:
1643: . gvec - The resulting grid vector

1645:   Level: beginner

1647: .seealso GVecCreateConstrained
1648: @*/
1649: int GVecCreate(Grid grid, GVec *gvec)
1650: {

1656:   GridSetUp(grid);
1657:   if (ierr) SETERRQ(PETSC_ERR_PLIB, "Grid setup failed");
1658:   GVecCreateRectangular(grid, grid->order, gvec);
1659:   return(0);
1660: }

1662: /*@
1663:   GVecCreateGhost - Creates a grid vector associated with a particular grid
1664:   with ghost padding on each processor.

1666:   Input Parameter:
1667: . grid - The grid defining the discrete function

1669:   Output Parameter:
1670: . gvec - The resulting grid vector

1672:   Level: beginner

1674: .seealso GVecCreate, GVecCreateConstrained
1675: @*/
1676: int GVecCreateGhost(Grid grid, GVec *gvec)
1677: {

1683:   GridSetUp(grid);
1684:   if (ierr) SETERRQ(PETSC_ERR_PLIB, "Grid setup failed");
1685:   GVecCreateRectangularGhost(grid, grid->order, gvec);
1686:   return(0);
1687: }

1689: /*@
1690:   GVecCreateRectangular - Creates a grid vector associated with a particular grid and ordering.

1692:   Input Parameter:
1693: . grid  - The grid defining the discrete function
1694: . order - The variable ordering

1696:   Output Parameter:
1697: . gvec - The resulting grid vector

1699:   Level: beginner

1701: .seealso GVecCreate, GVecCreateConstrained
1702: @*/
1703: int GVecCreateRectangular(Grid grid, VarOrdering order, GVec *gvec)
1704: {

1711:   GridSetUp(grid);
1712:   if (ierr) SETERRQ(PETSC_ERR_PLIB, "Grid setup failed");
1713:   VecCreateMPI(grid->comm, order->numLocVars, order->numVars, gvec);
1714:   PetscObjectCompose((PetscObject) *gvec, "Grid",  (PetscObject) grid);
1715:   PetscObjectCompose((PetscObject) *gvec, "Order", (PetscObject) order);
1716:   return(0);
1717: }

1719: /*@
1720:   GVecCreateRectangularGhost - Creates a grid vector associated with a particular grid
1721:   and ordering, adding ghost padding on each processor.

1723:   Input Parameter:
1724: . grid  - The grid defining the discrete function
1725: . order - The variable ordering

1727:   Output Parameter:
1728: . gvec - The resulting grid vector

1730:   Level: beginner

1732: .seealso GVecCreate, GVecCreateConstrained
1733: @*/
1734: int GVecCreateRectangularGhost(Grid grid, VarOrdering order, GVec *gvec)
1735: {

1742:   GridSetUp(grid);
1743:   if (ierr) SETERRQ(PETSC_ERR_PLIB, "Grid setup failed");
1744:   /* I should use MPICreateGhost(), but it is in a state of flux */
1745:   VecCreate(grid->comm, gvec);
1746:   VecSetSizes(*gvec, order->numLocVars, order->numVars);
1747:   VecCreate_MPI_Private(*gvec, order->numOverlapVars-order->numLocVars, PETSC_NULL, PETSC_NULL);
1748:   PetscObjectCompose((PetscObject) *gvec, "Grid",  (PetscObject) grid);
1749:   PetscObjectCompose((PetscObject) *gvec, "Order", (PetscObject) order);
1750:   return(0);
1751: }

1753: /*@
1754:   GVecCreateConstrained - Creates a grid vector associated with a
1755:   particular grid, but after constraints have been applied

1757:   Input Parameter:
1758: . grid - The grid defining the discrete function

1760:   Output Parameter:
1761: . gvec - The resulting grid vector

1763:   Level: beginner

1765: .seealso GVecCreate
1766: @*/
1767: int GVecCreateConstrained(Grid grid, GVec *gvec)
1768: {

1774:   GridSetUp(grid);
1775:   if (ierr) SETERRQ(PETSC_ERR_PLIB, "Grid setup failed");
1776:   if (grid->isConstrained) {
1777:     GVecCreateRectangular(grid, grid->constraintOrder, gvec);
1778:   } else {
1779:     GVecCreate(grid, gvec);
1780:   }
1781:   return(0);
1782: }

1784: /*@
1785:   GVecCreateBoundaryRestriction - Creates a grid vector associated with
1786:   the boundary variables of a particular grid.

1788:   Input Parameter:
1789: . grid - The grid defining the discrete function

1791:   Output Parameter:
1792: . gvec - The resulting grid vector

1794:   Level: beginner

1796: .seealso GVecCreate, GVecCreateConstrained
1797: @*/
1798: int GVecCreateBoundaryRestriction(Grid grid, GVec *gvec)
1799: {

1805:   GridSetUp(grid);
1806:   if (ierr) SETERRQ(PETSC_ERR_PLIB, "Grid setup failed");
1807:   GridSetupBoundary(grid);
1808:   GVecCreateRectangular(grid, grid->bdOrder, gvec);
1809:   return(0);
1810: }

1812: /*@
1813:   PointFunctionInterpolateField - A PointFunction that gives the field value at a given
1814:   point in the mesh based upon a grid vector representation.

1816:   Input Paramters:
1817: . n      - The number of points
1818: . comp   - The number of components
1819: . x,y,z  - The coordinates of points
1820: . ctx    - An InterpolationContext

1822:   Output Paramter:
1823: . values - The location where the field values are stored

1825:   Level: advanced

1827: .keywords point function, grid
1828: .seealso PointFunctionOne, PointFunctionZero, PointFunctionConstant
1829: @*/
1830: int PointFunctionInterpolateField(int n, int comp, double *x, double *y, double *z, PetscScalar *values, void *ctx) {
1831:   InterpolationContext *iCtx = (InterpolationContext *) ctx;
1832:   double               *px   = x;
1833:   double               *py   = y;
1834:   double               *pz   = z;
1835:   int                   i;
1836:   int                   ierr;

1839:   /* Check for a call purely for collective operations */
1840:   if (n == 0) {
1841:     GVecInterpolateField(iCtx->vec, iCtx->field, 0.0, 0.0, 0.0, PETSC_NULL, iCtx);
1842:     return(0);
1843:   }

1845:   /* We might do better by assuming that any two of these could be null */
1846:   if (z == PETSC_NULL) pz = px;
1847:   if (y == PETSC_NULL) py = px;

1849:   for(i = 0; i < n; i++) {
1850:     GVecInterpolateField(iCtx->vec, iCtx->field, px[i], py[i], pz[i], &values[i*comp], iCtx);
1851:   }
1852:   return(0);
1853: }

1855: /*@
1856:   PointFunctionInterpolateFieldBatch - A PointFunction that finishes an interpolation
1857:   by setting the off-processor values stored in the InterpolationContext.

1859:   Input Paramters:
1860: . n      - The number of points
1861: . comp   - The number of components
1862: . x,y,z  - The coordinates of points
1863: . ctx    - An InterpolationContext

1865:   Output Paramter:
1866: . values - The location where the field values are stored

1868:   Level: advanced

1870: .keywords point function, grid
1871: .seealso PointFunctionOne, PointFunctionZero, PointFunctionConstant
1872: @*/
1873: int PointFunctionInterpolateFieldBatch(int n, int comp, double *x, double *y, double *z, PetscScalar *values, void *ctx) {
1874:   InterpolationContext *iCtx = (InterpolationContext *) ctx;
1875:   double               *px   = x;
1876:   double               *py   = y;
1877:   double               *pz   = z;
1878:   int                   p    = iCtx->curPoint - iCtx->numPoints[iCtx->rank];
1879:   int                   i, c;

1882:   /* We might do better by assuming that any two of these could be null */
1883:   if (z == PETSC_NULL) pz = px;
1884:   if (y == PETSC_NULL) py = px;

1886:   for(i = 0; i < n; i++) {
1887:     if ((*px == iCtx->coords[p*3])   &&
1888:         (*py == iCtx->coords[p*3+1]) &&
1889:         (*pz == iCtx->coords[p*3+2])) {
1890:       /* Copy stored values */
1891: #if 0
1892:       PetscPrintf(PETSC_COMM_SELF, "Batch point %d on proc %dn", iCtx->curPoint, iCtx->rank);
1893:       PetscPrintf(PETSC_COMM_SELF, "  x: %g y: %g z: %gn  ", px[i], py[i], pz[i]);
1894:       for(c = 0; c < comp; c++) {
1895:         values[i*comp+c] = iCtx->values[iCtx->curPoint*comp+c];
1896:         PetscPrintf(PETSC_COMM_SELF, "val[%d]: %g ", c, values[i*comp+c]);
1897:       }
1898:       PetscPrintf(PETSC_COMM_SELF, "n");
1899: #else
1900:       for(c = 0; c < comp; c++) values[i*comp+c] = iCtx->values[iCtx->curPoint*comp+c];
1901: #endif
1902:       iCtx->curPoint++;
1903:       p++;
1904:     }
1905:   }
1906:   return(0);
1907: }