Actual source code: pipeline.c
1: /*$Id: pipeline.c,v 1.29 2001/09/07 20:08:55 bsmith Exp $*/
3: /*
4: Vector pipeline routines. These routines have all been contributed
5: by Victor Eijkhout while working at UCLA and UTK.
7: Note: these are not completely PETScified. For example the PETSCHEADER in the
8: pipeline object is not completely filled in so that the pipeline object cannot
9: be used as a TRUE PETSc object. Also there are some memory leaks. This code
10: attempts to reuse some of the VecScatter internal code and thus is kind of tricky.
12: */
14: #include src/vec/vecimpl.h
15: #include petscsys.h
16: #include src/mat/impls/aij/mpi/mpiaij.h
18: typedef int (*PipelineFunction)(int,PetscObject);
20: struct _p_VecPipeline {
21: PETSCHEADER(int)
22: VecScatter scatter;
23: PipelineType pipe_type; /* duplicated in the subdomain structure */
24: VecScatter_MPI_General *upto,*upfrom,*dnto,*dnfrom;
25: VecScatter_MPI_General *scatterto,*scatterfrom;
26: PipelineFunction upfn,dnfn;
27: PetscObject aux_data;
28: PetscObject custom_pipe_data;
29: int setupcalled;
30: int (*setup)(VecPipeline,PetscObject,PetscObject*);
31: };
33: static int VecPipelineCreateUpDown(VecScatter scatter,VecScatter_MPI_General **to,VecScatter_MPI_General **from)
34: {
35: VecScatter_MPI_General *gen_to,*gen_from,*pipe_to,*pipe_from;
36: int ierr;
39: gen_to = (VecScatter_MPI_General*)scatter->todata;
40: gen_from = (VecScatter_MPI_General*)scatter->fromdata;
42: PetscNew(VecScatter_MPI_General,&pipe_to);
43: PetscMemzero(pipe_to,sizeof(VecScatter_MPI_General));
44: PetscNew(VecScatter_MPI_General,&pipe_from);
45: PetscMemzero(pipe_from,sizeof(VecScatter_MPI_General));
47: pipe_to->requests = gen_to->requests;
48: pipe_from->requests = gen_from->requests;
49: pipe_to->local = gen_to->local;
50: pipe_from->local = gen_from->local;
51: pipe_to->values = gen_to->values;
52: pipe_from->values = gen_from->values;
53: ierr = PetscMalloc((gen_to->n+1)*sizeof(MPI_Status),&pipe_to->sstatus);
54: ierr = PetscMalloc((gen_from->n+1)*sizeof(MPI_Status),&pipe_from->sstatus);
55:
56: *to = pipe_to;
57: *from = pipe_from;
59: return(0);
60: }
62: /* --------------------------------------------------------------*/
63: /*C
64: VecPipelineCreate - Creates a vector pipeline context.
65: */
66: int VecPipelineCreate(MPI_Comm comm,Vec xin,IS ix,Vec yin,IS iy,VecPipeline *newctx)
67: {
68: VecPipeline ctx;
69: int ierr;
72: ierr = PetscNew(struct _p_VecPipeline,&ctx);
73: ierr = PetscMemzero(ctx,sizeof(struct _p_VecPipeline));
74: ctx->comm = comm;
75: ierr = VecScatterCreate(xin,ix,yin,iy,&(ctx->scatter));
76: ierr = VecPipelineSetType(ctx,PIPELINE_SEQUENTIAL,PETSC_NULL);
77: ctx->setupcalled = 0;
78: ctx->upfn = 0;
79: ctx->dnfn = 0;
81: /*
82: Keep a copy of the original scatter fields so we can destroy them later
83: when the pipeline is destroyed
84: */
85: ctx->scatterto = (VecScatter_MPI_General *)ctx->scatter->todata;
86: ctx->scatterfrom = (VecScatter_MPI_General *)ctx->scatter->fromdata;
88: VecPipelineCreateUpDown(ctx->scatter,&(ctx->upto),&(ctx->upfrom));
89: VecPipelineCreateUpDown(ctx->scatter,&(ctx->dnto),&(ctx->dnfrom));
91: *newctx = ctx;
93: return(0);
94: }
96: static int VecPipelineSetupSelect(VecScatter_MPI_General *gen,VecScatter_MPI_General *pipe,
97: int (*test)(int,PetscObject),PetscObject pipe_data)
98: {
99: int ierr,i;
102: pipe->n = 0;
103: for (i=0; i<gen->n; i++) {
104: if ((*test)(gen->procs[i],pipe_data)) {
105: pipe->n++;
106: }
107: }
108:
109: PetscMalloc((pipe->n+1)*sizeof(int),&pipe->procs);
110: PetscMalloc((pipe->n+1)*sizeof(int),&pipe->starts);
111: {
112: int pipe_size = 1;
113: if (gen->n) pipe_size = gen->starts[gen->n]+1;
114: PetscMalloc(pipe_size*sizeof(int),&pipe->indices);
115: }
116: {
117: int *starts = gen->starts,*pstarts = pipe->starts;
118: int *procs = gen->procs,*pprocs = pipe->procs;
119: int *indices = gen->indices,*pindices = pipe->indices;
120: int n = 0;
121:
122: pstarts[0]=0;
123: for (i=0; i<gen->n; i++) {
124: if ((*test)(gen->procs[i],pipe_data)) {
125: int j;
126: pprocs[n] = procs[i];
127: pstarts[n+1] = pstarts[n]+ starts[i+1]-starts[i];
128: for (j=0; j<pstarts[n+1]-pstarts[n]; j++) {
129: pindices[pstarts[n]+j] = indices[starts[i]+j];
130: }
131: n++;
132: }
133: }
134: }
136: return(0);
137: }
139: /*C
140: VecPipelineSetup - Sets up a vector pipeline context.
141: This call is done implicitly in VecPipelineBegin, but
142: since it is a bit costly, you may want to do it explicitly
143: when timing.
144: */
145: int VecPipelineSetup(VecPipeline ctx)
146: {
147: VecScatter_MPI_General *gen_to,*gen_from;
148: int ierr;
151: if (ctx->setupcalled) return(0);
153: (ctx->setup)(ctx,ctx->aux_data,&(ctx->custom_pipe_data));
155: gen_to = (VecScatter_MPI_General*)ctx->scatter->todata;
156: gen_from = (VecScatter_MPI_General*)ctx->scatter->fromdata;
158: /* data for PIPELINE_UP */
159: VecPipelineSetupSelect(gen_to,ctx->upto,ctx->upfn,ctx->custom_pipe_data);
160: VecPipelineSetupSelect(gen_from,ctx->upfrom,ctx->dnfn,ctx->custom_pipe_data);
162: /* data for PIPELINE_DOWN */
163: VecPipelineSetupSelect(gen_to,ctx->dnto,ctx->dnfn,ctx->custom_pipe_data);
164: VecPipelineSetupSelect(gen_from,ctx->dnfrom,ctx->upfn,ctx->custom_pipe_data);
166: ctx->setupcalled = 1;
167:
168: return(0);
169: }
171: /*
172: VecPipelineSetType
173: */
174: EXTERN int ProcYes(int proc,PetscObject pipe_info);
175: EXTERN int ProcUp(int proc,PetscObject pipe_info);
176: EXTERN int ProcDown(int proc,PetscObject pipe_info);
177: EXTERN int ProcColorUp(int proc,PetscObject pipe_info);
178: EXTERN int ProcColorDown(int proc,PetscObject pipe_info);
179: EXTERN int PipelineSequentialSetup(VecPipeline,PetscObject,PetscObject*);
180: EXTERN int PipelineRedblackSetup(VecPipeline,PetscObject,PetscObject*);
181: EXTERN int PipelineMulticolorSetup(VecPipeline,PetscObject,PetscObject*);
183: int ProcNo(int proc,PetscObject pipe_info);
185: /*C
186: VecPipelineSetType - Sets the type of a vector pipeline. Vector
187: pipelines are to be used as
189: VecPipelineBegin(<see below for parameters>)
190: <do useful work with incoming data>
191: VecPipelineEnd(<see below for paramteres>)
193: Input Parameters:
194: + ctx - vector pipeline object
195: + type - vector pipeline type
196: Choices currently allowed are
197: -- PIPELINE_NONE all processors send and receive simultaneously
198: -- PIPELINE_SEQUENTIAL processors send and receive in ascending
199: order of MPI rank
200: -- PIPELINE_RED_BLACK even numbered processors only send; odd numbered
201: processors only receive
202: -- PIPELINE_MULTICOLOR processors are given a color and send/receive
203: according to ascending color
204: + x - auxiliary data; for PIPELINE_MULTICOLOR this should be
205: <(PetscObject)pmat> where pmat is the matrix on which the coloring
206: is to be based.
208: .seealso: VecPipelineCreate(), VecPipelineBegin(), VecPipelineEnd().
209: */
210: int VecPipelineSetType(VecPipeline ctx,PipelineType type,PetscObject x)
211: {
213: ctx->pipe_type = type;
214: ctx->aux_data = x;
215: if (type==PIPELINE_NONE) {
216: ctx->upfn = &ProcYes;
217: ctx->dnfn = &ProcYes;
218: ctx->setup = &PipelineSequentialSetup;
219: } else if (type==PIPELINE_SEQUENTIAL) {
220: ctx->upfn = &ProcUp;
221: ctx->dnfn = &ProcDown;
222: ctx->setup = &PipelineSequentialSetup;
223: } else if (type == PIPELINE_REDBLACK) {
224: ctx->upfn = &ProcColorUp;
225: ctx->dnfn = &ProcColorDown;
226: ctx->setup = &PipelineRedblackSetup;
227: } else if (type == PIPELINE_MULTICOLOR) {
228: ctx->upfn = &ProcColorUp;
229: ctx->dnfn = &ProcColorDown;
230: ctx->setup = &PipelineMulticolorSetup;
231: } else {
232: SETERRQ1(1,"VecPipelineSetType: unknown or not implemented type %d",(int)type);
233: }
235: return(0);
236: }
238: /*
239: VecPipelineBegin - Receive data from processor earlier in
240: a processor pipeline from one vector to another.
242: .seealso: VecPipelineEnd.
243: */
244: int VecPipelineBegin(Vec x,Vec y,InsertMode addv,ScatterMode smode,PipelineDirection pmode,VecPipeline ctx)
245: {
249: if (!ctx->setupcalled) {
250: VecPipelineSetup(ctx);
251: }
253: if (pmode==PIPELINE_UP) {
254: ctx->scatter->todata = ctx->upto;
255: ctx->scatter->fromdata = ctx->upfrom;
256: } else if (pmode==PIPELINE_DOWN) {
257: ctx->scatter->todata = ctx->dnto;
258: ctx->scatter->fromdata = ctx->dnfrom;
259: } else SETERRQ1(1,"VecPipelineBegin: unknown or not implemented pipeline mode %d",pmode);
261: {
262: VecScatter scat = ctx->scatter;
263: VecScatter_MPI_General *gen_to;
264: int nsends=0;
266: if (smode & SCATTER_REVERSE){
267: gen_to = (VecScatter_MPI_General*)scat->fromdata;
268: } else {
269: gen_to = (VecScatter_MPI_General*)scat->todata;
270: }
271: if (ctx->pipe_type != PIPELINE_NONE) {
272: nsends = gen_to->n;
273: gen_to->n = 0;
274: }
275: VecScatterBegin(x,y,addv,smode,scat);
276: VecScatterEnd(x,y,addv,smode,scat);
277: if (ctx->pipe_type != PIPELINE_NONE) gen_to->n = nsends;
278: }
279: return(0);
280: }
282: /*
283: VecPipelineEnd - Send data to processors later in
284: a processor pipeline from one vector to another.
285:
286: .seealso: VecPipelineBegin.
287: */
288: int VecPipelineEnd(Vec x,Vec y,InsertMode addv,ScatterMode smode,PipelineDirection pmode,VecPipeline ctx)
289: {
290: VecScatter scat = ctx->scatter;
291: VecScatter_MPI_General *gen_from,*gen_to;
292: int nsends=0,nrecvs,ierr;
293:
295: if (smode & SCATTER_REVERSE){
296: gen_to = (VecScatter_MPI_General*)scat->fromdata;
297: gen_from = (VecScatter_MPI_General*)scat->todata;
298: } else {
299: gen_to = (VecScatter_MPI_General*)scat->todata;
300: gen_from = (VecScatter_MPI_General*)scat->fromdata;
301: }
302: if (ctx->pipe_type == PIPELINE_NONE) {
303: nsends = gen_to->n;
304: gen_to->n = 0;
305: }
306: nrecvs = gen_from->n;
307: gen_from->n = 0;
308: VecScatterBegin(x,y,addv,smode,scat);
309: VecScatterEnd(x,y,addv,smode,scat);
310: if (ctx->pipe_type == PIPELINE_NONE) gen_to->n = nsends;
311: gen_from->n = nrecvs;
313: return(0);
314: }
316: /*
317: This destroys the material that is allocated inside the
318: VecScatter_MPI_General datastructure. Mimics the
319: VecScatterDestroy_PtoP() object except it does not destroy
320: the wrapper VecScatter object.
321: */
323: static int VecPipelineDestroy_MPI_General(VecScatter_MPI_General *gen)
324: {
328: if (gen->sstatus) {PetscFree(gen->sstatus);}
329: PetscFree(gen);
330: return(0);
331: }
333: /*C
334: VecPipelineDestroy - Destroys a pipeline context created by
335: VecPipelineCreate().
337: */
338: int VecPipelineDestroy(VecPipeline ctx)
339: {
343: /* free the VecScatter_MPI_General data structures */
344: if (ctx->upto) {
345: PetscFree(ctx->upto->procs);
346: PetscFree(ctx->upto->starts);
347: PetscFree(ctx->upto->indices);
348: VecPipelineDestroy_MPI_General(ctx->upto);
349: }
350: if (ctx->upfrom) {
351: PetscFree(ctx->upfrom->procs);
352: PetscFree(ctx->upfrom->starts);
353: PetscFree(ctx->upfrom->indices);
354: VecPipelineDestroy_MPI_General(ctx->upfrom);
355: }
356: if (ctx->dnto) {
357: PetscFree(ctx->dnto->procs);
358: PetscFree(ctx->dnto->starts);
359: PetscFree(ctx->dnto->indices);
360: VecPipelineDestroy_MPI_General(ctx->dnto);
361: }
362: if (ctx->dnfrom) {
363: PetscFree(ctx->dnfrom->procs);
364: PetscFree(ctx->dnfrom->starts);
365: PetscFree(ctx->dnfrom->indices);
366: VecPipelineDestroy_MPI_General(ctx->dnfrom);
367: }
368: if (ctx->scatterto) {
369: PetscFree(ctx->scatterto->values);
370: if (ctx->scatterto->local.slots) {PetscFree(ctx->scatterto->local.slots);}
371: VecPipelineDestroy_MPI_General(ctx->scatterto);
372: }
373: if (ctx->scatterfrom) {
374: PetscFree(ctx->scatterfrom->values);
375: if (ctx->scatterfrom->local.slots) {PetscFree(ctx->scatterfrom->local.slots);}
376: VecPipelineDestroy_MPI_General(ctx->scatterfrom);
377: }
379: if (ctx->custom_pipe_data) {PetscFree(ctx->custom_pipe_data);}
380: PetscHeaderDestroy(ctx->scatter);
381: PetscFree(ctx);
383: return(0);
384: }
386: /* >>>> Routines for sequential ordering of processors <<<< */
388: typedef struct {int rank;} Pipeline_sequential_info;
390: int ProcYes(int proc,PetscObject pipe_info)
391: {
393: PetscFunctionReturn(1);
394: }
395: int ProcNo(int proc,PetscObject pipe_info)
396: {
398: return(0);
399: }
401: int ProcUp(int proc,PetscObject pipe_info)
402: {
403: int rank = ((Pipeline_sequential_info *)pipe_info)->rank;
406: if (rank<proc) {
407: PetscFunctionReturn(1);
408: } else {
409: return(0);
410: }
411: }
413: int ProcDown(int proc,PetscObject pipe_info)
414: {
415: int rank = ((Pipeline_sequential_info *)pipe_info)->rank;
418: if (rank>proc) {
419: PetscFunctionReturn(1);
420: } else {
421: return(0);
422: }
423: }
425: int PipelineSequentialSetup(VecPipeline vs,PetscObject x,PetscObject *obj)
426: {
427: Pipeline_sequential_info *info;
428: int ierr;
431: PetscNew(Pipeline_sequential_info,&info);
432: MPI_Comm_rank(vs->scatter->comm,&(info->rank));
433: *obj = (PetscObject)info;
435: return(0);
436: }
438: /* >>>> Routines for multicolor ordering of processors <<<< */
440: typedef struct {
441: int rank,size,*proc_colors;
442: } Pipeline_colored_info;
444: int ProcColorUp(int proc,PetscObject pipe_info)
445: {
446: Pipeline_colored_info* comm_info = (Pipeline_colored_info*)pipe_info;
447: int rank = comm_info->rank;
450: if (comm_info->proc_colors[rank]<comm_info->proc_colors[proc]) {
451: PetscFunctionReturn(1);
452: } else {
453: return(0);
454: }
455: }
456: int ProcColorDown(int proc,PetscObject pipe_info)
457: {
458: Pipeline_colored_info* comm_info = (Pipeline_colored_info*)pipe_info;
459: int rank = comm_info->rank;
462: if (comm_info->proc_colors[rank]>comm_info->proc_colors[proc]) {
463: PetscFunctionReturn(1);
464: } else {
465: return(0);
466: }
467: }
469: int PipelineRedblackSetup(VecPipeline vs,PetscObject x,PetscObject *obj)
470: {
471: Pipeline_colored_info *info;
472: int size,i,ierr;
475: PetscNew(Pipeline_colored_info,&info);
476: MPI_Comm_rank(vs->scatter->comm,&(info->rank));
477: MPI_Comm_size(vs->scatter->comm,&size);
478: PetscMalloc(size*sizeof(int),&info->proc_colors);
479: for (i=0; i<size; i++) {info->proc_colors[i] = i%2;}
480: *obj = (PetscObject)info;
482: return(0);
483: }
485: int PipelineMulticolorSetup(VecPipeline vs,PetscObject x,PetscObject *obj)
486: {
487: Pipeline_colored_info *info;
488: Mat mat = (Mat) x;
489: int size,ierr;
492: PetscNew(Pipeline_colored_info,&info);
493: MPI_Comm_rank(mat->comm,&(info->rank));
494: MPI_Comm_size(mat->comm,&size);
495: PetscMalloc(size*sizeof(int),&info->proc_colors);
496: PetscMemzero(info->proc_colors,size*sizeof(int));
498: /* coloring */
499: {
500: Mat_MPIAIJ *Aij = (Mat_MPIAIJ*)mat->data;
501: int *owners = Aij->rowners,*touch = Aij->garray;
502: int ntouch = Aij->B->n;
503: int *conn,*colr;
504: int *colors = info->proc_colors,base = info->rank*size;
505: int p,e;
507: /* allocate connectivity matrix */
508: PetscMalloc(size*size*sizeof(int),&conn);
509: PetscMalloc(size*sizeof(int),&colr);
510: PetscMemzero(conn,size*size*sizeof(int));
512: /* fill in local row of connectivity matrix */
513: p = 0; e = 0;
514: loop:
515: while (touch[e]>=owners[p+1]) {
516: p++;
517: #if defined(PETSC_DEBUG)
518: if (p>=size) SETERRQ(1,p,"Processor overflow");
519: #endif
520: }
521: conn[base+p] = 1;
522: if (p==size-1) ;
523: else {
524: while (touch[e]<owners[p+1]) {
525: e++;
526: if (e>=ntouch) goto exit;
527: }
528: goto loop;
529: }
530: exit:
531: /* distribute to establish local copies of full connectivity matrix */
532: MPI_Allgather(conn+base,size,MPI_INT,conn,size,MPI_INT,mat->comm);
534: base = size;
535: /*PetscPrintf(mat->comm,"Coloring: 0->0");*/
536: for (p=1; p<size; p++) {
537: int q,hi=-1,nc=0;
538: PetscMemzero(colr,size*sizeof(int));
539: for (q=0; q<p; q++) { /* inspect colors of all connect previous procs */
540: if (conn[base+q] /* should be tranposed! */) {
541: if (!colr[colors[q]]) {
542: nc++;
543: colr[colors[q]] = 1;
544: if (colors[q]>hi) hi = colors[q];
545: }
546: }
547: }
548: if (hi+1!=nc) {
549: nc = 0;
550: while (colr[nc]) nc++;
551: }
552: colors[p] = nc;
553: /*PetscPrintf(mat->comm,",%d->%d",p,colors[p]);*/
554: base = base+size;
555: }
556: /*PetscPrintf(mat->comm,"n");*/
557: PetscFree(conn);
558: PetscFree(colr);
559: }
560: *obj = (PetscObject)info;
562: return(0);
563: }
565: int VecPipelineView(VecPipeline pipe,PetscViewer viewer)
566: {
567: MPI_Comm comm = pipe->comm;
568: int ierr;
571: PetscPrintf(comm,">> Vector Pipeline<<n");
572: if (!pipe->setupcalled) {PetscPrintf(comm,"Not yet set upn");}
573: PetscPrintf(comm,"Pipelinetype: %dn",(int)pipe->pipe_type);
574: PetscPrintf(comm,"based on scatter:n");
575: /* VecScatterView(pipe->scatter,viewer);*/
576: PetscPrintf(comm,"Up function %pn",pipe->upfn);
577: PetscPrintf(comm,"Dn function %pn",pipe->dnfn);
579: return(0);
580: }