Actual source code: gmat2d.c
1: #ifdef PETSC_RCS_HEADER
2: static char vcid[] = "$Id: gmat2d.c,v 1.24 2000/01/31 17:34:32 knepley Exp $";
3: #endif
5: /* Implements FE matrices derived from 2d triangular grids */
6: #include "petscsles.h" /* For ALE Operators */
7: #include "src/gvec/gvecimpl.h" /*I "gvec.h" I*/
8: #include "src/mesh/impls/triangular/triimpl.h"
9: #include "src/grid/impls/triangular/2d/elemvec2d.h"
10: #include "gmat2d.h"
12: extern int GridResetConstrainedMultiply_Private(Grid, GMat);
14: static int PlaceVariables_Private(int startVar, int nodeVars, int locColStart, int locColEnd, int sStartVar, int sNodeVars,
15: PetscTruth rectangular, int *diagRows, int *offdiagRows)
16: {
17: int var;
20: if ((nodeVars == 0) || (sNodeVars == 0)) return(0);
21: /* Check to see whether the variables fall within the diagonal block */
22: if ((sStartVar + sNodeVars <= locColStart) || (sStartVar >= locColEnd)) {
23: for(var = 0; var < nodeVars; var++) {
24: offdiagRows[startVar+var] += sNodeVars;
25: }
26: } else if ((sStartVar >= locColStart) && (sStartVar + sNodeVars <= locColEnd)) {
27: for(var = 0; var < nodeVars; var++) {
28: diagRows[startVar+var] += sNodeVars;
29: }
30: } else if (rectangular) {
31: /* Allow cuts on a single node for rectangular matrices */
32: if (sStartVar < locColStart) {
33: /* Cut is from below */
34: for(var = 0; var < nodeVars; var++) {
35: diagRows[startVar+var] += (sStartVar + sNodeVars) - locColStart;
36: offdiagRows[startVar+var] += locColStart - sStartVar;
37: }
38: } else {
39: /* Cut is from above */
40: for(var = 0; var < nodeVars; var++) {
41: diagRows[startVar+var] += locColEnd - sStartVar;
42: offdiagRows[startVar+var] += (sStartVar + sNodeVars) - locColEnd;
43: }
44: }
45: } else {
46: /* Row blocking cuts variables on a single node. This is bad partitioning. */
47: SETERRQ(PETSC_ERR_ARG_WRONG, "Row blocking cut variables on a single node");
48: }
49: return(0);
50: }
52: int GridCreateGMat_Triangular_2D(Grid grid, VarOrdering sOrder, VarOrdering tOrder, PetscTruth bdCols, GMat *gmat)
53: {
54: MPI_Comm comm;
55: Mesh mesh;
56: Partition part;
57: int locRowStart; /* The row that this partition starts on */
58: int locRowEnd; /* The row that the next partition starts on */
59: int locColStart; /* The column that this partition starts on */
60: int locColEnd; /* The column that the next partition starts on */
61: int newLocColStart;/* The column that the new variable domain starts on */
62: int newLocColEnd; /* The column after the new variable domain ends */
63: int *diagRows; /* Number of nonzeros in each diagonal portion */
64: int *offdiagRows; /* Number of nonzeros in each off-diagonal portion */
65: int nodeVars; /* Number of variables on node */
66: int newNodeVars; /* Number of new variables on node */
67: int sNodeVars; /* Number of variables on a node in the support of a given node */
68: int sNewNodeVars; /* Number of new variables on a node in the support of a given node */
69: int startVar; /* First variable on a node */
70: int newStartVar; /* First new variable on a node */
71: int sStartVar; /* First variable on a support node (global numbering) */
72: int sNewStartVar; /* First new variable on a support node (global numbering) */
73: int *nodeDone; /* A 1 indicates that the node has already been processed */
74: int *nodeNeighbors; /* A list of the nodes in the support of a given node */
75: int degree; /* The degree of a vertex */
76: int *support; /* A list of elements in the support of a basis function */
77: PetscTruth rectangular; /* Flag for a rectangular matrix */
78: int numGhostNodes; /* The number of nodes constrained by variables in another domain */
79: int numGhostVars; /* The number of new variables which lie in another domain */
80: int *ghostProcs; /* The processor for each ghost node */
81: int *ghostNodes; /* The global index for each ghost node */
82: int *ghostVarProcs; /* The processor for each ghost variable */
83: int *ghostVars; /* The global index for each ghost variables */
84: int newComp; /* The number of components in the new field */
85: int numOverlapElements;
86: PetscConstraintObject constCtx = grid->constraintCtx;
87: FieldClassMap rowMap, colMap;
88: int numCorners;
89: int numNodes;
90: int marker;
91: int maxDegree;
92: int *rowClasses, *colClasses;
93: int *rowClassSizes, *colClassSizes;
94: int *rowIsConst, *colIsConst;
95: int rowLocVars = tOrder->numLocVars;
96: int rowVars = tOrder->numVars;
97: int *rowFirstVar = tOrder->firstVar;
98: int *rowOffsets = tOrder->offsets;
99: int colLocVars = sOrder->numLocVars;
100: int colVars = sOrder->numVars;
101: int *colFirstVar = sOrder->firstVar;
102: int *colOffsets = sOrder->offsets;
103: int rank, numProcs;
104: int proc, elem, sElem, corner, sCorner, neighbor, node, sNode, nclass, sNclass, var, count;
105: PetscTruth opt;
106: int ierr;
109: PetscObjectGetComm((PetscObject) grid, &comm);
110: MPI_Comm_size(comm, &numProcs);
111: MPI_Comm_rank(comm, &rank);
112: GridGetMesh(grid, &mesh);
113: MeshGetPartition(mesh, &part);
114: VarOrderingGetClassMap(tOrder, &rowMap);
115: VarOrderingGetClassMap(sOrder, &colMap);
116: MeshGetNumCorners(mesh, &numCorners);
117: MeshGetMaxDegree(mesh, &maxDegree);
118: PartitionGetNumOverlapElements(part, &numOverlapElements);
119: numNodes = rowMap->numNodes;
120: rowClasses = rowMap->classes;
121: rowClassSizes = rowMap->classSizes;
122: rowIsConst = rowMap->isClassConstrained;
123: colClasses = colMap->classes;
124: colClassSizes = colMap->classSizes;
125: colIsConst = colMap->isClassConstrained;
126: newLocColStart = -1;
127: newLocColEnd = -1;
128: /* Get partition information */
129: locRowStart = rowFirstVar[rank];
130: locRowEnd = rowFirstVar[rank+1];
131: locColStart = colFirstVar[rank];
132: locColEnd = colFirstVar[rank+1];
133: rectangular = (sOrder->numVars != tOrder->numVars) ? PETSC_TRUE : PETSC_FALSE;
134: /* Get new field information */
135: if (constCtx != PETSC_NULL) {
136: (*constCtx->ops->getsize)(constCtx, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL, &newComp);
137:
138: }
140: /* Preallocate possible nonzeros - Note that we are being pessimistic since we set
141: the whole dense element matrix, which we know contains some zeros for certain
142: operators */
143: PetscMalloc(numNodes * sizeof(int), &nodeDone);
144: PetscMalloc(maxDegree*numCorners * sizeof(int), &nodeNeighbors);
146: /* Get the number of ghost variables due to constraints */
147: numGhostNodes = 0;
148: numGhostVars = 0;
149: if ((grid->isConstrained == PETSC_TRUE) && (numProcs > 1)) {
150: PetscMemzero(nodeDone, numNodes * sizeof(int));
151: for(elem = 0; elem < numOverlapElements; elem++)
152: for(corner = 0; corner < numCorners; corner++) {
153: MeshGetNodeFromElement(mesh, elem, corner, &node);
154: if (node >= numNodes) continue;
155: if (nodeDone[node]) continue;
156: nodeDone[node] = 1;
158: nclass = rowClasses[node];
159: if (rowIsConst[nclass]) {
160: (*constCtx->ops->getindices)(constCtx, mesh, tOrder, node, CONSTRAINT_ROW_INDEX, &startVar);
161: /* Include only new variables since only they can be ghosts */
162: nodeVars = newComp;
163: if ((startVar < locRowStart) || (startVar >= locRowEnd)) {
164: /* This is a constraint which generates an off-processor variable */
165: numGhostNodes++;
166: numGhostVars += nodeVars;
167: }
168: }
169: }
170: }
172: /* Calculate matrix allocation */
173: if (numGhostNodes > 0) {
174: PetscMalloc(numGhostNodes * sizeof(int), &ghostNodes);
175: PetscMalloc(numGhostNodes * sizeof(int), &ghostProcs);
176: PetscMalloc(numGhostVars * sizeof(int), &ghostVars);
177: PetscMalloc(numGhostVars * sizeof(int), &ghostVarProcs);
178: }
179: PetscMalloc((rowLocVars+numGhostVars) * sizeof(int), &diagRows);
180: PetscMalloc((rowLocVars+numGhostVars) * sizeof(int), &offdiagRows);
181: PetscMemzero(diagRows, (rowLocVars+numGhostVars) * sizeof(int));
182: PetscMemzero(offdiagRows, (rowLocVars+numGhostVars) * sizeof(int));
183: PetscMemzero(nodeDone, numNodes * sizeof(int));
184: for(elem = 0, numGhostNodes = 0, numGhostVars = 0; elem < numOverlapElements; elem++) {
185: for(corner = 0; corner < numCorners; corner++) {
186: MeshGetNodeFromElement(mesh, elem, corner, &node);
187: if (node >= numNodes)
188: continue;
189: if (nodeDone[node])
190: continue;
191: nodeDone[node] = 1;
193: nclass = rowClasses[node];
194: startVar = rowOffsets[node] - locRowStart;
195: nodeVars = rowClassSizes[nclass];
196: newNodeVars = 0;
197: if (rowIsConst[nclass]) {
198: (*constCtx->ops->getindices)(constCtx, mesh, tOrder, node, CONSTRAINT_ROW_INDEX, &newStartVar);
199: /* Include only new variables */
200: newNodeVars = newComp;
201: if ((newStartVar < locRowStart) || (newStartVar >= locRowEnd)) {
202: /* This is a constraint which generates an off-processor variable */
203: ghostNodes[numGhostNodes] = newStartVar;
204: for(proc = 0; newStartVar >= rowFirstVar[proc+1]; proc++) ;
205: ghostProcs[numGhostNodes] = proc;
206: for(var = 0; var < newComp; var++, numGhostVars++) {
207: ghostVars[numGhostVars] = newStartVar + var;
208: ghostVarProcs[numGhostVars] = proc;
209: }
210: numGhostNodes++;
211: /* Set partition for the appropriate processor */
212: newLocColStart = colFirstVar[proc];
213: newLocColEnd = colFirstVar[proc+1];
214: /* Reset newStartVar to the correct position in diagRows */
215: newStartVar = rowLocVars + (numGhostVars - newComp);
216: } else {
217: newLocColStart = locColStart;
218: newLocColEnd = locColEnd;
219: /* Reset newStartVar to the correct position in diagRows */
220: newStartVar -= locRowStart;
221: }
222: }
223: if (nodeVars+newNodeVars == 0) continue;
225: /* Loop over nodes on each element in the support of the node */
226: MeshGetNodeSupport(mesh, node, elem, °ree, &support);
227: for(sElem = 0, count = 0; sElem < degree; sElem++) {
228: for(sCorner = 0; sCorner < numCorners; sCorner++) {
229: /* Disregard normal columns if we are forming a boundary matrix */
230: MeshGetNodeFromElement(mesh, support[sElem], sCorner, &sNode);
231: MeshGetNodeBoundary(mesh, sNode, &marker);
232: if ((bdCols == PETSC_TRUE) && (marker == 0)) continue;
233: sNclass = colClasses[sNode];
234: sStartVar = colOffsets[sNode];
235: sNodeVars = colClassSizes[sNclass];
236: sNewNodeVars = 0;
238: if (colIsConst[sNclass]) {
239: (*constCtx->ops->getindices)(constCtx, mesh, sOrder, sNode, CONSTRAINT_COL_INDEX, &sNewStartVar);
240:
241: sNewNodeVars = newComp;
242: }
244: /* Check for duplicate node */
245: for(neighbor = 0; neighbor < count; neighbor++) {
246: if (nodeNeighbors[neighbor] == sNode) break;
247: }
248: if (neighbor < count) {
249: continue;
250: } else {
251: #ifdef PETSC_USE_BOPT_g
252: if (count >= maxDegree*numCorners) {
253: SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE, "Too many neighboring nodes: %d", count);
254: }
255: #endif
256: nodeNeighbors[count++] = sNode;
257: }
259: PlaceVariables_Private(startVar, nodeVars, locColStart, locColEnd, sStartVar, sNodeVars,
260: rectangular, diagRows, offdiagRows);
261:
262: PlaceVariables_Private(newStartVar, newNodeVars, newLocColStart, newLocColEnd, sStartVar, sNodeVars,
263: rectangular, diagRows, offdiagRows);
264:
265: PlaceVariables_Private(startVar, nodeVars, locColStart, locColEnd, sNewStartVar, sNewNodeVars,
266: rectangular, diagRows, offdiagRows);
267:
268: PlaceVariables_Private(newStartVar, newNodeVars, newLocColStart, newLocColEnd, sNewStartVar, sNewNodeVars,
269: rectangular, diagRows, offdiagRows);
270:
271: #ifdef PETSC_USE_BOPT_g
272: if ((numProcs == 1) && (offdiagRows[startVar] > 0)) {
273: for(proc = 0; proc <= numProcs; proc++)
274: PetscPrintf(PETSC_COMM_SELF, "colFirstVar[%d]: %dn", proc, colFirstVar[proc]);
275: for(node = 0; node < colMap->numNodes; node++)
276: PetscPrintf(PETSC_COMM_SELF, "colOffsets[%d]: %dn", node, colOffsets[node]);
277: PetscPrintf(PETSC_COMM_SELF, "sNode %d sStartVar %d in [%d,%d)n", sNode, sStartVar, locColStart, locColEnd);
278: SETERRQ2(PETSC_ERR_PLIB, "Invalid var alloc in elem %d var %d", elem, startVar);
279: }
280: if ((numProcs == 1) && (rowIsConst[nclass]) && (offdiagRows[newStartVar] > 0)) {
281: SETERRQ2(PETSC_ERR_PLIB, "Invalid var alloc in elem %d var %d", elem, newStartVar);
282: }
283: #endif
284: }
285: }
286: MeshRestoreNodeSupport(mesh, node, elem, °ree, &support);
287: }
288: }
290: #ifdef PETSC_USE_BOPT_g
291: /* Check that we looked at every node */
292: for(node = 0; node < numNodes; node++){
293: if (!nodeDone[node]) SETERRQ1(PETSC_ERR_PLIB, "Node %d was not encountered", node);
294: }
295: #endif
296: PetscOptionsHasName(PETSC_NULL, "-trace_alloc", &opt);
297: if (opt == PETSC_TRUE) {
298: for(var = 0; var < rowLocVars; var++) {
299: PetscSynchronizedPrintf(comm, "diagRows[%d]: %d offdiagRows[%d]: %dn",
300: var + rowFirstVar[rank], diagRows[var], var + rowFirstVar[rank], offdiagRows[var]);
301: }
302: PetscSynchronizedFlush(comm);
303: }
304: #ifdef PETSC_USE_BOPT_g
305: #endif
307: /* Communicate */
308: if ((grid->isConstrained == PETSC_TRUE) && (numProcs > 1)) {
309: PetscGhostExchange(comm, numGhostVars, ghostVarProcs, ghostVars, PETSC_INT, rowFirstVar,
310: ADD_VALUES, SCATTER_REVERSE, diagRows, &diagRows[rowLocVars]);
311:
312: PetscGhostExchange(comm, numGhostVars, ghostVarProcs, ghostVars, PETSC_INT, rowFirstVar,
313: ADD_VALUES, SCATTER_REVERSE, offdiagRows, &offdiagRows[rowLocVars]);
314:
315: }
317: /* Stopgap solution for constrained variables */
318: if (grid->isConstrained == PETSC_TRUE) {
319: for(var = 0; var < rowLocVars; var++) {
320: if (diagRows[var] > colLocVars) diagRows[var] = colLocVars;
321: if (offdiagRows[var] > colLocVars) offdiagRows[var] = colLocVars;
322: }
323: }
325: /* Create the matrix */
326: MatCreateMPIAIJ(comm, rowLocVars, colLocVars, rowVars, colVars, 0, diagRows, 0, offdiagRows, gmat);
327: PetscObjectCompose((PetscObject) *gmat, "Grid", (PetscObject) grid);
328: MatSetOption(*gmat, MAT_NEW_NONZERO_ALLOCATION_ERR);
330: /* Cleanup */
331: ierr = PetscFree(diagRows);
332: ierr = PetscFree(offdiagRows);
333: ierr = PetscFree(nodeDone);
334: ierr = PetscFree(nodeNeighbors);
335: if (numGhostNodes > 0) {
336: PetscFree(ghostNodes);
337: PetscFree(ghostProcs);
338: PetscFree(ghostVars);
339: PetscFree(ghostVarProcs);
340: }
342: return(0);
343: }
345: int GMatView_Draw_Triangular_2D(GMat gmat, PetscViewer v)
346: {
350: MatView(gmat, v);
351: PetscFunctionReturn(ierr);
352: }
354: int GMatView_Triangular_2D(GMat gmat, PetscViewer viewer)
355: {
356: Grid grid;
357: PetscTruth isascii, isdraw;
358: int ierr;
361: PetscTypeCompare((PetscObject) viewer, PETSC_VIEWER_ASCII, &isascii);
362: PetscTypeCompare((PetscObject) viewer, PETSC_VIEWER_DRAW, &isdraw);
363: if (isascii == PETSC_TRUE) {
364: GMatGetGrid(gmat, &grid);
365: GridView(grid, viewer);
366: PetscViewerFlush(viewer);
367: MatView(gmat, viewer);
368: } else if (isdraw == PETSC_TRUE) {
369: GMatView_Draw_Triangular_2D(gmat, viewer);
370: }
372: return(0);
373: }
375: int GMatEvaluateALEOperatorGalerkin_Triangular_2D(Grid grid, GMat M, int numFields, int *sFields, VarOrdering sOrder,
376: LocalVarOrdering sLocOrder, int *tFields, VarOrdering tOrder,
377: LocalVarOrdering tLocOrder, int op, PetscScalar alpha, MatAssemblyType type,
378: void *ctx)
379: {
380: Mesh mesh = grid->mesh;
381: Partition part;
382: int numElements;
383: int sElemSize = sLocOrder->elemSize;
384: int tElemSize = tLocOrder->elemSize;
385: int *sElemStart = sLocOrder->elemStart;
386: int *tElemStart = tLocOrder->elemStart;
387: ElementVec ghostVec = grid->ghostElementVec; /* Local solution vector */
388: PetscScalar *ghostArray = ghostVec->array; /* The values in the ghost element vector */
389: MeshMover mover;
390: Grid ALEGrid; /* The grid describing the mesh velocity */
391: ElementMat mat; /* The element matrix */
392: PetscScalar *array; /* The values in the element matrix */
393: ElementVec MeshALEVec; /* The ALE velocity vector with mesh discretization */
394: ElementVec ALEVec; /* The ALE velocity vector */
395: PetscScalar *ALEArray; /* The values in the ALE element vector */
396: int sField, tField;
397: int f, elem;
398: #ifdef PETSC_USE_BOPT_g
399: int i, j;
400: PetscTruth opt;
401: #endif
402: int ierr;
405: MeshGetPartition(mesh, &part);
406: MeshGetMover(mesh, &mover);
407: PartitionGetNumElements(part, &numElements);
408: MeshMoverGetVelocityGrid(mover, &ALEGrid);
409: /* Setup element matrix */
410: ierr = ElementMatCreate(grid->comm, tElemSize, sElemSize, &mat);
411: array = mat->array;
413: /* Setup ALE variables */
414: if (grid->ALEActive == PETSC_TRUE) {
415: /* Notice that the ALEArray is from this grid, not the mesh velocity grid */
416: MeshALEVec = ALEGrid->vec;
417: ALEVec = grid->vec;
418: ALEArray = ALEVec->array;
419: } else {
420: MeshALEVec = PETSC_NULL;
421: ALEVec = PETSC_NULL;
422: ALEArray = PETSC_NULL;
423: }
425: /* Setup the operator with information about the test function space */
426: for(f = 0; f < numFields; f++) {
427: grid->fields[sFields[f]].disc->operators[op]->test = grid->fields[tFields[f]].disc;
428: }
430: for(elem = 0; elem < numElements; elem++) {
431: /* Initialize element matrix */
432: ElementMatZero(mat);
434: /* Setup global row and column indices */
435: GridCalcLocalElementVecIndices(grid, elem, ghostVec);
437: /* Setup local solution vector */
438: GridLocalToElement(grid, ghostVec);
440: /* Setup ALE variables */
441: if (grid->ALEActive == PETSC_TRUE)
442: {
443: GridCalcLocalElementVecIndices(ALEGrid, elem, MeshALEVec);
444: GridLocalToElement(ALEGrid, MeshALEVec);
445: }
447: for(f = 0; f < numFields; f++)
448: {
449: sField = sFields[f];
450: tField = tFields[f];
451: /* Calculate the contribution to the element matrix from the field */
452: GridInterpolateElementVec(ALEGrid, 0, MeshALEVec, grid, sField, ALEVec);
453: DiscretizationEvaluateALEOperatorGalerkin(grid->fields[sField].disc, mesh, sElemSize, tElemStart[tField], sElemStart[sField],
454: op, alpha, elem, &ghostArray[sElemStart[sField]], ALEArray, array, ctx);
455:
456: #ifdef PETSC_USE_BOPT_g
457: #endif
458: }
460: /* Setup global row and column indices */
461: GridCalcGeneralElementMatIndices(grid, elem, sOrder, tOrder, PETSC_FALSE, mat);
462: #ifdef PETSC_USE_BOPT_g
463: PetscOptionsHasName(PETSC_NULL, "-trace_mat_assembly", &opt);
464: if (opt == PETSC_TRUE) {
465: PetscPrintf(grid->comm, " %3d", mat->colIndices[0]);
466: for(i = 1; i < mat->reduceColSize; i++)
467: PetscPrintf(grid->comm, " %3d", mat->colIndices[i]);
468: PetscPrintf(grid->comm, "n");
469: for(i = 0; i < mat->reduceRowSize; i++)
470: {
471: PetscPrintf(grid->comm, "%3d ", mat->rowIndices[i]);
472: for(j = 0; j < mat->reduceColSize; j++)
473: PetscPrintf(grid->comm, "%5.2g ", PetscRealPart(mat->array[i*mat->reduceColSize+j]));
474: PetscPrintf(grid->comm, "n");
475: }
476: }
477: #endif
478: /* Put values in global matrix */
479: ElementMatSetValues(mat, M, ADD_VALUES);
480: }
481: MatAssemblyBegin(M, type);
482: MatAssemblyEnd(M, type);
484: /* Cleanup */
485: ElementMatDestroy(mat);
487: /* Reset size functions */
488: GridResetConstrainedMultiply_Private(grid, M);
489: return(0);
490: }
492: int GMatEvaluateOperatorGalerkin_Triangular_2D(Grid grid, GMat M, GVec x, VarOrdering sOrder, LocalVarOrdering sLocOrder,
493: VarOrdering tOrder, LocalVarOrdering tLocOrder, int op, PetscScalar alpha,
494: MatAssemblyType type, void *ctx)
495: {
496: Mesh mesh = grid->mesh;
497: PetscTruth reduceSystem = grid->reduceSystem;
498: PetscTruth reduceElement = grid->reduceElement;
499: int sElemSize = sLocOrder->elemSize;
500: int tElemSize = tLocOrder->elemSize;
501: int *sElemStart = sLocOrder->elemStart;
502: int *tElemStart = tLocOrder->elemStart;
503: FieldClassMap sMap, tMap;
504: int numSFields, numTFields;
505: int *sFields, *tFields;
506: PetscTruth sConstrained, tConstrained;
507: Vec ghostVec; /* The local ghost vector for x (usually the solution) */
508: VecScatter ghostScatter; /* The scatter from x to ghostVec */
509: ElementMat mat;
510: ElementVec elemGhostVec;
511: PetscScalar *ghostArray, *array;
512: int numElements;
513: int sField, tField;
514: int f, elem;
515: #ifdef PETSC_USE_BOPT_g
516: PetscTruth opt;
517: #endif
518: int ierr;
521: MeshGetInfo(mesh, PETSC_NULL, PETSC_NULL, PETSC_NULL, &numElements);
522: VarOrderingGetClassMap(sOrder, &sMap);
523: VarOrderingGetClassMap(tOrder, &tMap);
524: numSFields = sMap->numFields;
525: sFields = sMap->fields;
526: sConstrained = sMap->isConstrained;
527: numTFields = tMap->numFields;
528: tFields = tMap->fields;
529: tConstrained = tMap->isConstrained;
530: /* Setup reduction */
531: (*grid->ops->gridsetupghostscatter)(grid, tOrder, &ghostVec, &ghostScatter);
532: /* Setup element vector and matrix */
533: if (tConstrained == PETSC_TRUE) {
534: for(f = 0; f < numTFields; f++) {
535: if (grid->fields[tFields[f]].isConstrained == PETSC_TRUE)
536: tElemSize += grid->fields[tFields[f]].disc->funcs*grid->fields[tFields[f]].constraintCompDiff;
537: }
538: }
539: if (sConstrained == PETSC_TRUE) {
540: for(f = 0; f < numSFields; f++) {
541: if (grid->fields[sFields[f]].isConstrained == PETSC_TRUE)
542: sElemSize += grid->fields[sFields[f]].disc->funcs*grid->fields[sFields[f]].constraintCompDiff;
543: }
544: }
545: ierr = ElementVecCreate(grid->comm, tElemSize, &elemGhostVec);
546: ghostArray = elemGhostVec->array;
547: ierr = ElementMatCreate(grid->comm, tElemSize, sElemSize, &mat);
548: array = mat->array;
549: ierr = ElementVecZero(elemGhostVec);
551: /* Fill the local solution vectors */
552: if (x != PETSC_NULL) {
553: GridGlobalToLocalGeneral(grid, x, ghostVec, INSERT_VALUES, ghostScatter);
554: }
556: /* Setup the operator with information about the test function space */
557: for(f = 0; f < numSFields; f++) {
558: grid->fields[sFields[f]].disc->operators[op]->test = grid->fields[tFields[f]].disc;
559: }
561: for(elem = 0; elem < numElements; elem++) {
562: /* Initialize element matrix */
563: ElementMatZero(mat);
564: mat->reduceRowSize = tLocOrder->elemSize;
565: mat->reduceColSize = sLocOrder->elemSize;
566: elemGhostVec->reduceSize = tLocOrder->elemSize;
568: if (x != PETSC_NULL) {
569: /* Setup local row indices for the ghost vector */
570: GridCalcGeneralElementVecIndices(grid, elem, tOrder, PETSC_NULL, PETSC_TRUE, elemGhostVec);
571: /* Setup local solution vector */
572: GridLocalToElementGeneral(grid, ghostVec, grid->bdReduceVecCur, reduceSystem, reduceElement, elemGhostVec);
573: /* Must transform to unconstrained variables for element integrals */
574: GridProjectElementVec(grid, mesh, elem, tOrder, PETSC_FALSE, elemGhostVec);
575: }
576: for(f = 0; f < numSFields; f++) {
577: sField = sFields[f];
578: tField = tFields[f];
579: /* Calculate the contribution to the element matrix from the field */
580: DiscretizationEvaluateOperatorGalerkin(grid->fields[sField].disc, mesh, sElemSize, tElemStart[tField], sElemStart[sField],
581: op, alpha, elem, &ghostArray[sElemStart[sField]], array, ctx);
582:
583: #ifdef PETSC_USE_BOPT_g
584: #endif
585: }
587: /* Setup global row and column indices */
588: GridCalcGeneralElementMatIndices(grid, elem, sOrder, tOrder, PETSC_FALSE, mat);
589: #ifdef PETSC_USE_BOPT_g
590: PetscOptionsHasName(PETSC_NULL, "-trace_mat_assembly", &opt);
591: if (opt == PETSC_TRUE) {
592: ElementMatView(mat, PETSC_VIEWER_STDOUT_(mat->comm));
593: }
594: #endif
595: /* Put values in global matrix */
596: ElementMatSetValues(mat, M, ADD_VALUES);
597: }
599: MatAssemblyBegin(M, type);
600: MatAssemblyEnd(M, type);
602: /* Cleanup */
603: VecDestroy(ghostVec);
604: VecScatterDestroy(ghostScatter);
605: ElementVecDestroy(elemGhostVec);
606: ElementMatDestroy(mat);
608: return(0);
609: }
611: int GMatEvaluateALEConstrainedOperatorGalerkin_Triangular_2D(Grid grid, GMat M, int numFields, int *sFields, VarOrdering sOrder,
612: LocalVarOrdering sLocOrder, int *tFields, VarOrdering tOrder,
613: LocalVarOrdering tLocOrder, int op, PetscScalar alpha, MatAssemblyType type,
614: void *ctx)
615: {
616: Mesh mesh = grid->mesh;
617: Partition part;
618: int numElements;
619: int sElemSize = sLocOrder->elemSize;
620: int tElemSize = tLocOrder->elemSize;
621: int *sElemStart = sLocOrder->elemStart;
622: int *tElemStart = tLocOrder->elemStart;
623: ElementVec ghostVec = grid->ghostElementVec; /* Local solution vector */
624: PetscScalar *ghostArray = ghostVec->array; /* The values in the ghost element vector */
625: MeshMover mover;
626: Grid ALEGrid; /* The grid describing the mesh velocity */
627: ElementMat mat; /* The element matrix */
628: PetscScalar *array; /* The values in the element matrix */
629: ElementVec MeshALEVec; /* The ALE velocity vector with mesh discretization */
630: ElementVec ALEVec; /* The ALE velocity vector */
631: PetscScalar *ALEArray; /* The values in the ALE element vector */
632: int sField, tField;
633: int f, elem;
634: #ifdef PETSC_USE_BOPT_g
635: PetscTruth opt;
636: int i, j;
637: #endif
638: int ierr;
641: MeshGetPartition(mesh, &part);
642: MeshGetMover(mesh, &mover);
643: PartitionGetNumElements(part, &numElements);
644: MeshMoverGetVelocityGrid(mover, &ALEGrid);
645: /* Setup element matrix */
646: for(f = 0; f < numFields; f++) {
647: if (grid->fields[sFields[f]].isConstrained == PETSC_TRUE)
648: sElemSize += grid->fields[sFields[f]].disc->funcs*grid->fields[sFields[f]].constraintCompDiff;
649: if (grid->fields[tFields[f]].isConstrained == PETSC_TRUE)
650: tElemSize += grid->fields[tFields[f]].disc->funcs*grid->fields[tFields[f]].constraintCompDiff;
651: }
652: ierr = ElementMatCreate(grid->comm, tElemSize, sElemSize, &mat);
653: array = mat->array;
655: /* Setup ALE variables -- No new variables should be ALE so ALEVec is not recalculated */
656: if (grid->ALEActive == PETSC_TRUE) {
657: /* Notice that the ALEArray is from this grid, not the mesh velocity grid */
658: MeshALEVec = ALEGrid->vec;
659: ALEVec = grid->vec;
660: ALEArray = ALEVec->array;
661: } else {
662: MeshALEVec = PETSC_NULL;
663: ALEVec = PETSC_NULL;
664: ALEArray = PETSC_NULL;
665: }
667: /* Setup the operator with information about the test function space */
668: for(f = 0; f < numFields; f++) {
669: grid->fields[sFields[f]].disc->operators[op]->test = grid->fields[tFields[f]].disc;
670: }
672: for(elem = 0; elem < numElements; elem++)
673: {
674: /* Initialize element matrix */
675: ElementMatZero(mat);
676: mat->reduceRowSize = tLocOrder->elemSize;
677: mat->reduceColSize = sLocOrder->elemSize;
679: /* Setup global row and column indices */
680: GridCalcLocalElementVecIndices(grid, elem, ghostVec);
682: /* Setup local solution vector */
683: GridLocalToElement(grid, ghostVec);
685: /* Setup ALE variables */
686: if (grid->ALEActive == PETSC_TRUE) {
687: GridCalcLocalElementVecIndices(ALEGrid, elem, MeshALEVec);
688: GridLocalToElement(ALEGrid, MeshALEVec);
689: }
691: for(f = 0; f < numFields; f++)
692: {
693: sField = sFields[f];
694: tField = tFields[f];
695: /* Calculate the contribution to the element matrix from the field */
696: GridInterpolateElementVec(ALEGrid, 0, MeshALEVec, grid, sField, ALEVec);
697: DiscretizationEvaluateALEOperatorGalerkin(grid->fields[sField].disc, mesh, sElemSize, tElemStart[tField], sElemStart[sField],
698: op, alpha, elem, &ghostArray[sElemStart[sField]], ALEArray, array, ctx);
699:
700: #ifdef PETSC_USE_BOPT_g
701: #endif
702: }
704: /* Setup global row and column indices */
705: GridCalcGeneralElementMatIndices(grid, elem, sOrder, tOrder, PETSC_FALSE, mat);
706: #ifdef PETSC_USE_BOPT_g
707: PetscOptionsHasName(PETSC_NULL, "-trace_mat_assembly", &opt);
708: if (opt == PETSC_TRUE) {
709: PetscPrintf(grid->comm, " %3d", mat->colIndices[0]);
710: for(i = 1; i < mat->reduceColSize; i++)
711: PetscPrintf(grid->comm, " %3d", mat->colIndices[i]);
712: PetscPrintf(grid->comm, "n");
713: for(i = 0; i < mat->reduceRowSize; i++) {
714: PetscPrintf(grid->comm, "%3d ", mat->rowIndices[i]);
715: for(j = 0; j < mat->reduceColSize; j++)
716: PetscPrintf(grid->comm, "%5.2g ", PetscRealPart(mat->array[i*mat->reduceColSize+j]));
717: PetscPrintf(grid->comm, "n");
718: }
719: }
720: #endif
721: /* Put values in global matrix */
722: ElementMatSetValues(mat, M, ADD_VALUES);
723: }
725: MatAssemblyBegin(M, type);
726: MatAssemblyEnd(M, type);
728: /* Cleanup */
729: ElementMatDestroy(mat);
731: /* Reset size functions */
732: GridResetConstrainedMultiply_Private(grid, M);
733: return(0);
734: }
736: int GMatEvaluateNewFields_Triangular_2D(Grid grid, GMat M, int numFields, int *sFields, VarOrdering sOrder,
737: LocalVarOrdering sLocOrder, int *tFields, VarOrdering tOrder,
738: LocalVarOrdering tLocOrder, PetscScalar alpha, MatAssemblyType type, void *ctx)
739: {
740: VarOrdering constOrder = grid->constraintOrder; /* The constrained variable ordering */
741: PetscConstraintObject constCtx = grid->constraintCtx; /* The constraint object */
742: int sElemSize = 0;
743: int tElemSize = 0;
744: ElementMat mat; /* The element matrix */
745: int f, newField;
746: #ifdef PETSC_USE_BOPT_g
747: int i, j;
748: PetscTruth opt;
749: #endif
750: int ierr;
753: /* Setup element matrix */
754: for(f = 0; f < numFields; f++) {
755: if (grid->fields[sFields[f]].isConstrained == PETSC_TRUE)
756: sElemSize += grid->fields[sFields[f]].disc->comp + grid->fields[sFields[f]].constraintCompDiff;
757: if (grid->fields[tFields[f]].isConstrained == PETSC_TRUE)
758: tElemSize += grid->fields[tFields[f]].disc->comp + grid->fields[tFields[f]].constraintCompDiff;
759: }
760: ElementMatCreate(grid->comm, tElemSize, sElemSize, &mat);
762: for(newField = 0; newField < grid->numNewFields; newField++) {
763: /* Initialize element matrix */
764: ElementMatZero(mat);
766: /* Calculate the indices and contribution to the element matrix from the new field */
767: (*constCtx->ops->newelemmat)(constCtx, constOrder, newField, mat);
768: #ifdef PETSC_USE_BOPT_g
769: PetscOptionsHasName(PETSC_NULL, "-trace_mat_assembly", &opt);
770: if (opt == PETSC_TRUE) {
771: PetscPrintf(grid->comm, " %3d", mat->colIndices[0]);
772: for(i = 1; i < mat->reduceColSize; i++)
773: PetscPrintf(grid->comm, " %3d", mat->colIndices[i]);
774: PetscPrintf(grid->comm, "n");
775: for(i = 0; i < mat->reduceRowSize; i++)
776: {
777: PetscPrintf(grid->comm, "%3d ", mat->rowIndices[i]);
778: for(j = 0; j < mat->reduceColSize; j++)
779: PetscPrintf(grid->comm, "%5.2g ", PetscRealPart(mat->array[i*mat->reduceColSize+j]));
780: PetscPrintf(grid->comm, "n");
781: }
782: }
783: #endif
784: /* Put values in global matrix */
785: ElementMatSetValues(mat, M, ADD_VALUES);
786: #ifdef PETSC_USE_BOPT_g
787: #endif
788: }
790: MatAssemblyBegin(M, type);
791: MatAssemblyEnd(M, type);
793: /* Cleanup */
794: ElementMatDestroy(mat);
796: GridResetConstrainedMultiply_Private(grid, M);
797: return(0);
798: }
800: int GMatEvaluateBoundaryOperatorGalerkin_Triangular_2D(Grid grid, GMat M, GVec x, VarOrdering sOrder, LocalVarOrdering sLocOrder,
801: VarOrdering tOrder, LocalVarOrdering tLocOrder, int op, PetscScalar alpha,
802: MatAssemblyType type, void *ctx)
803: {
804: MPI_Comm comm;
805: Mesh mesh = grid->mesh;
806: Partition part;
807: Mesh_Triangular *tri = (Mesh_Triangular *) mesh->data;
808: PetscTruth reduceSystem = grid->reduceSystem;
809: PetscTruth reduceElement = grid->reduceElement;
810: int sElemSize = sLocOrder->elemSize;
811: int tElemSize = tLocOrder->elemSize;
812: int *sElemStart = sLocOrder->elemStart;
813: int *tElemStart = tLocOrder->elemStart;
814: int numEdges;
815: int *bdEdges = tri->bdEdges;
816: int rank;
817: FieldClassMap sMap, tMap;
818: int firstEdge;
819: int numSFields, numTFields;
820: int *sFields, *tFields;
821: PetscTruth sConstrained, tConstrained;
822: Vec ghostVec; /* The local ghost vector for x (usually the solution) */
823: VecScatter ghostScatter; /* The scatter from x to ghostVec */
824: ElementMat mat;
825: ElementVec elemGhostVec;
826: PetscScalar *array;
827: EdgeContext bdCtx; /* A context wrapper to communicate the midnode of an edge */
828: int sField, tField;
829: int f, bd, edge, bdEdge, midNode;
830: #ifdef PETSC_USE_BOPT_g
831: PetscTruth opt;
832: #endif
833: int ierr;
836: PetscObjectGetComm((PetscObject) grid, &comm);
837: MPI_Comm_rank(comm, &rank);
838: VarOrderingGetClassMap(sOrder, &sMap);
839: VarOrderingGetClassMap(tOrder, &tMap);
840: MeshGetPartition(mesh, &part);
841: PartitionGetNumEdges(part, &numEdges);
842: PartitionGetStartEdge(part, &firstEdge);
843: numSFields = sMap->numFields;
844: sFields = sMap->fields;
845: sConstrained = sMap->isConstrained;
846: numTFields = tMap->numFields;
847: tFields = tMap->fields;
848: tConstrained = tMap->isConstrained;
849: /* Setup reduction */
850: (*grid->ops->gridsetupghostscatter)(grid, tOrder, &ghostVec, &ghostScatter);
851: /* Setup element vector and matrix */
852: if (tConstrained == PETSC_TRUE) {
853: for(f = 0; f < numTFields; f++) {
854: if (grid->fields[tFields[f]].isConstrained == PETSC_TRUE)
855: tElemSize += grid->fields[tFields[f]].disc->funcs*grid->fields[tFields[f]].constraintCompDiff;
856: }
857: }
858: if (sConstrained == PETSC_TRUE) {
859: for(f = 0; f < numSFields; f++) {
860: if (grid->fields[sFields[f]].isConstrained == PETSC_TRUE)
861: sElemSize += grid->fields[sFields[f]].disc->funcs*grid->fields[sFields[f]].constraintCompDiff;
862: }
863: }
864: ElementVecCreate(comm, tElemSize, &elemGhostVec);
865: ElementMatCreate(comm, tElemSize, sElemSize, &mat);
866: ElementVecZero(elemGhostVec);
867: array = mat->array;
869: /* Setup user context */
870: bdCtx.ctx = ctx;
872: /* Fill the local solution vectors */
873: if (x != PETSC_NULL) {
874: GridGlobalToLocalGeneral(grid, x, ghostVec, INSERT_VALUES, ghostScatter);
875: }
877: /* Setup the operator with information about the test function space */
878: for(f = 0; f < numSFields; f++) {
879: grid->fields[sFields[f]].disc->bdDisc->operators[op]->test = grid->fields[tFields[f]].disc;
880: }
882: /* Our problem here is that "edges" are not data structures like "elements". The element
883: holds the midnodes which appear on it, but edges do not. Thus we must pass the midnode
884: number to the discretization, which we do using a context wrapper. Unfortunately, the
885: row indices were derived from element, so we must introduce another numbering function
886: which operates on nodes alone. The midnode number is found by a search of the elements
887: which could certainly be improved with geometric hints. We might also assume that it
888: is the node lying between the two endpoints in the bdNodes[] array. In addition, the
889: boundary variable ordering is in relation to boundary node numbers, so that the node
890: number must be converted before calling the numbering function. This could be speeded up
891: by placing boundary node numbers in the bdEdges[] array instead. */
893: /* Loop over boundary edges */
894: for(bd = 0, bdEdge = 0; bd < grid->numBd; bd++) {
895: for(bdEdge = tri->bdEdgeBegin[bd]; bdEdge < tri->bdEdgeBegin[bd+1]; bdEdge++) {
896: /* Check that edge is on this processor */
897: edge = bdEdges[bdEdge] - firstEdge;
898: if ((edge < 0) || (edge > numEdges)) continue;
900: MeshGetMidnodeFromEdge(mesh, edge, &midNode);
901: bdCtx.midnode = midNode;
903: /* Initialize element matrix */
904: ElementMatZero(mat);
905: mat->reduceRowSize = tLocOrder->elemSize;
906: mat->reduceColSize = sLocOrder->elemSize;
907: elemGhostVec->reduceSize = tLocOrder->elemSize;
909: if (x != PETSC_NULL) {
910: /* Setup local row indices for the ghost vector */
911: GridCalcBoundaryElementVecIndices(grid, bd, edge, midNode, tOrder, PETSC_TRUE, elemGhostVec);
912: /* Setup local solution vector */
913: GridLocalToElementGeneral(grid, ghostVec, grid->bdReduceVecCur, reduceSystem, reduceElement, elemGhostVec);
914: /* Must transform to unconstrained variables for element integrals */
915: GridProjectElementVec(grid, mesh, edge, tOrder, PETSC_FALSE, elemGhostVec);
916: SETERRQ(PETSC_ERR_SUP, "Being reworked");
917: }
918: for(f = 0; f < numSFields; f++) {
919: sField = sFields[f];
920: tField = tFields[f];
921: /* Calculate the contribution to the element matrix from the field */
922: DiscretizationEvaluateOperatorGalerkin(grid->fields[sField].disc->bdDisc, mesh, sElemSize, tElemStart[tField],
923: sElemStart[sField], op, alpha, edge, PETSC_NULL, array, &bdCtx);
924:
925: #ifdef PETSC_USE_BOPT_g
926: #endif
927: }
929: /* Setup global row and column indices */
930: GridCalcBoundaryElementMatIndices(grid, bd, edge, midNode, sOrder, tOrder, PETSC_FALSE, mat);
931: #ifdef PETSC_USE_BOPT_g
932: PetscOptionsHasName(PETSC_NULL, "-trace_mat_bd_assembly", &opt);
933: if (opt == PETSC_TRUE) {
934: ElementMatView(mat, PETSC_VIEWER_STDOUT_(mat->comm));
935: }
936: #endif
937: /* Put values in the global matrix */
938: ElementMatSetValues(mat, M, ADD_VALUES);
939: }
940: }
941: #ifdef PETSC_USE_BOPT_g
942: if (bdEdge != mesh->numBdEdges) SETERRQ(PETSC_ERR_PLIB, "Invalid boundary edge numbering");
943: #endif
945: MatAssemblyBegin(M, type);
946: MatAssemblyEnd(M, type);
948: /* Cleanup */
949: VecDestroy(ghostVec);
950: VecScatterDestroy(ghostScatter);
951: ElementVecDestroy(elemGhostVec);
952: ElementMatDestroy(mat);
954: return(0);
955: }