Actual source code: shvec.c
1: /*$Id: shvec.c,v 1.53 2001/09/07 20:09:02 bsmith Exp $*/
3: /*
4: This file contains routines for Parallel vector operations that use shared memory
5: */
6: #include src/vec/impls/mpi/pvecimpl.h
8: /*
9: Could not get the include files to work properly on the SGI with
10: the C++ compiler.
11: */
12: #if defined(PETSC_USE_SHARED_MEMORY) && !defined(__cplusplus)
14: EXTERN int PetscSharedMalloc(MPI_Comm,int,int,void**);
16: int VecDuplicate_Shared(Vec win,Vec *v)
17: {
18: int ierr;
19: Vec_MPI *w = (Vec_MPI *)win->data;
20: PetscScalar *array;
24: /* first processor allocates entire array and sends it's address to the others */
25: PetscSharedMalloc(win->comm,win->n*sizeof(PetscScalar),win->N*sizeof(PetscScalar),(void**)&array);
27: VecCreate(win->comm,v);
28: VecSetSizes(*v,win->n,win->N);
29: VecCreate_MPI_Private(*v,w->nghost,array,win->map);
31: /* New vector should inherit stashing property of parent */
32: (*v)->stash.donotstash = win->stash.donotstash;
33:
34: PetscOListDuplicate(win->olist,&(*v)->olist);
35: PetscFListDuplicate(win->qlist,&(*v)->qlist);
37: if (win->mapping) {
38: (*v)->mapping = win->mapping;
39: PetscObjectReference((PetscObject)win->mapping);
40: }
41: (*v)->ops->duplicate = VecDuplicate_Shared;
42: (*v)->bs = win->bs;
43: (*v)->bstash.bs = win->bstash.bs;
44: return(0);
45: }
48: EXTERN_C_BEGIN
49: int VecCreate_Shared(Vec vv)
50: {
51: int ierr;
52: PetscScalar *array;
55: PetscSplitOwnership(vv->comm,&vv->n,&vv->N);
56: PetscSharedMalloc(vv->comm,vv->n*sizeof(PetscScalar),vv->N*sizeof(PetscScalar),(void**)&array);
58: VecCreate_MPI_Private(vv,0,array,PETSC_NULL);
59: vv->ops->duplicate = VecDuplicate_Shared;
61: return(0);
62: }
63: EXTERN_C_END
66: /* ----------------------------------------------------------------------------------------
67: Code to manage shared memory allocation under the SGI with MPI
69: We associate with a communicator a shared memory "areana" from which memory may be shmalloced.
70: */
71: #include petscsys.h
72: #include "petscfix.h"
73: #if defined(PETSC_HAVE_PWD_H)
74: #include <pwd.h>
75: #endif
76: #include <ctype.h>
77: #include <sys/types.h>
78: #include <sys/stat.h>
79: #if defined(PETSC_HAVE_UNISTD_H)
80: #include <unistd.h>
81: #endif
82: #if defined(PETSC_HAVE_STDLIB_H)
83: #include <stdlib.h>
84: #endif
85: #if !defined(PARCH_win32)
86: #include <sys/param.h>
87: #include <sys/utsname.h>
88: #endif
89: #if defined(PARCH_win32)
90: #include <windows.h>
91: #include <io.h>
92: #include <direct.h>
93: #endif
94: #if defined (PARCH_win32_gnu)
95: #include <windows.h>
96: #endif
97: #include <fcntl.h>
98: #include <time.h>
99: #if defined(PETSC_HAVE_SYS_SYSTEMINFO_H)
100: #include <sys/systeminfo.h>
101: #endif
102: #include "petscfix.h"
104: static int Petsc_Shared_keyval = MPI_KEYVAL_INVALID;
105: static int Petsc_Shared_size = 100000000;
107: /*
108: Private routine to delete internal storage when a communicator is freed.
109: This is called by MPI, not by users.
111: The binding for the first argument changed from MPI 1.0 to 1.1; in 1.0
112: it was MPI_Comm *comm.
113: */
114: static int Petsc_DeleteShared(MPI_Comm comm,int keyval,void* attr_val,void* extra_state)
115: {
119: PetscFree(attr_val);
120: PetscFunctionReturn(MPI_SUCCESS);
121: }
123: int PetscSharedMemorySetSize(int s)
124: {
126: Petsc_Shared_size = s;
127: return(0);
128: }
130: #include "petscfix.h"
132: #include <ulocks.h>
134: int PetscSharedInitialize(MPI_Comm comm)
135: {
136: int rank,len,ierr,flag;
137: char filename[256];
138: usptr_t **arena;
142: if (Petsc_Shared_keyval == MPI_KEYVAL_INVALID) {
143: /*
144: The calling sequence of the 2nd argument to this function changed
145: between MPI Standard 1.0 and the revisions 1.1 Here we match the
146: new standard, if you are using an MPI implementation that uses
147: the older version you will get a warning message about the next line;
148: it is only a warning message and should do no harm.
149: */
150: MPI_Keyval_create(MPI_NULL_COPY_FN,Petsc_DeleteShared,&Petsc_Shared_keyval,0);
151: }
153: MPI_Attr_get(comm,Petsc_Shared_keyval,(void**)&arena,&flag);
155: if (!flag) {
156: /* This communicator does not yet have a shared memory areana */
157: PetscMalloc(sizeof(usptr_t*),&arena);
159: MPI_Comm_rank(comm,&rank);
160: if (!rank) {
161: PetscStrcpy(filename,"/tmp/PETScArenaXXXXXX");
162: #ifdef PETSC_HAVE_MKSTEMP
163: if (mkstemp(filename) < 0) {
164: SETERRQ1(PETSC_ERR_FILE_OPEN, "Unable to open temporary file %s", filename);
165: }
166: #else
167: if (mktemp(filename) == PETSC_NULL) {
168: SETERRQ1(PETSC_ERR_FILE_OPEN, "Unable to open temporary file %s", filename);
169: }
170: #endif
171: PetscStrlen(filename,&len);
172: }
173: ierr = MPI_Bcast(&len,1,MPI_INT,0,comm);
174: ierr = MPI_Bcast(filename,len+1,MPI_CHAR,0,comm);
175: ierr = PetscOptionsGetInt(PETSC_NULL,"-shared_size",&Petsc_Shared_size,&flag);
176: usconfig(CONF_INITSIZE,Petsc_Shared_size);
177: *arena = usinit(filename);
178: ierr = MPI_Attr_put(comm,Petsc_Shared_keyval,arena);
179: }
181: return(0);
182: }
184: int PetscSharedMalloc(MPI_Comm comm,int llen,int len,void **result)
185: {
186: char *value;
187: int ierr,shift,rank,flag;
188: usptr_t **arena;
191: *result = 0;
192: if (Petsc_Shared_keyval == MPI_KEYVAL_INVALID) {
193: PetscSharedInitialize(comm);
194: }
195: MPI_Attr_get(comm,Petsc_Shared_keyval,(void**)&arena,&flag);
196: if (!flag) {
197: PetscSharedInitialize(comm);
198: MPI_Attr_get(comm,Petsc_Shared_keyval,(void**)&arena,&flag);
199: if (!flag) SETERRQ(1,"Unable to initialize shared memory");
200: }
202: ierr = MPI_Scan(&llen,&shift,1,MPI_INT,MPI_SUM,comm);
203: shift -= llen;
205: MPI_Comm_rank(comm,&rank);
206: if (!rank) {
207: value = (char*)usmalloc((size_t) len,*arena);
208: if (!value) {
209: (*PetscErrorPrintf)("PETSC ERROR: Unable to allocate shared memory locationn");
210: (*PetscErrorPrintf)("PETSC ERROR: Run with option -shared_size <size> n");
211: (*PetscErrorPrintf)("PETSC_ERROR: with size > %d n",(int)(1.2*(Petsc_Shared_size+len)));
212: SETERRQ(1,"Unable to malloc shared memory");
213: }
214: }
215: MPI_Bcast(&value,8,MPI_BYTE,0,comm);
216: value += shift;
218: return(0);
219: }
221: #else
223: EXTERN_C_BEGIN
224: extern int VecCreate_Seq(Vec);
225: EXTERN_C_END
227: EXTERN_C_BEGIN
228: int VecCreate_Shared(Vec vv)
229: {
230: int ierr,size;
233: MPI_Comm_size(vv->comm,&size);
234: if (size > 1) {
235: SETERRQ(1,"No supported for shared memory vector objects on this machine");
236: }
237: VecCreate_Seq(vv);
238: return(0);
239: }
240: EXTERN_C_END
242: #endif
244: /*@C
245: VecCreateShared - Creates a parallel vector that uses shared memory.
247: Input Parameters:
248: . comm - the MPI communicator to use
249: . n - local vector length (or PETSC_DECIDE to have calculated if N is given)
250: . N - global vector length (or PETSC_DECIDE to have calculated if n is given)
252: Output Parameter:
253: . vv - the vector
255: Collective on MPI_Comm
256:
257: Notes:
258: Currently VecCreateShared() is available only on the SGI; otherwise,
259: this routine is the same as VecCreateMPI().
261: Use VecDuplicate() or VecDuplicateVecs() to form additional vectors of the
262: same type as an existing vector.
264: Level: advanced
266: Concepts: vectors^creating with shared memory
268: .seealso: VecCreateSeq(), VecCreate(), VecCreateMPI(), VecDuplicate(), VecDuplicateVecs(),
269: VecCreateGhost(), VecCreateMPIWithArray(), VecCreateGhostWithArray()
271: @*/
272: int VecCreateShared(MPI_Comm comm,int n,int N,Vec *v)
273: {
277: VecCreate(comm,v);
278: VecSetSizes(*v,n,N);
279: VecSetType(*v,VECSHARED);
280: return(0);
281: }