Actual source code: shvec.c
1: #define PETSCVEC_DLL
2: /*
3: This file contains routines for Parallel vector operations that use shared memory
4: */
5: #include src/vec/impls/mpi/pvecimpl.h
7: /*
8: Could not get the include files to work properly on the SGI with
9: the C++ compiler.
10: */
11: #if defined(PETSC_USE_SHARED_MEMORY) && !defined(__cplusplus)
13: EXTERN PetscErrorCode PetscSharedMalloc(MPI_Comm,PetscInt,PetscInt,void**);
17: PetscErrorCode VecDuplicate_Shared(Vec win,Vec *v)
18: {
20: Vec_MPI *w = (Vec_MPI *)win->data;
21: PetscScalar *array;
25: /* first processor allocates entire array and sends it's address to the others */
26: PetscSharedMalloc(win->comm,win->n*sizeof(PetscScalar),win->N*sizeof(PetscScalar),(void**)&array);
28: VecCreate(win->comm,v);
29: VecSetSizes(*v,win->n,win->N);
30: VecCreate_MPI_Private(*v,w->nghost,array,win->map);
32: /* New vector should inherit stashing property of parent */
33: (*v)->stash.donotstash = win->stash.donotstash;
34:
35: PetscOListDuplicate(win->olist,&(*v)->olist);
36: PetscFListDuplicate(win->qlist,&(*v)->qlist);
38: if (win->mapping) {
39: (*v)->mapping = win->mapping;
40: PetscObjectReference((PetscObject)win->mapping);
41: }
42: (*v)->ops->duplicate = VecDuplicate_Shared;
43: (*v)->bs = win->bs;
44: (*v)->bstash.bs = win->bstash.bs;
45: return(0);
46: }
52: PetscErrorCode PETSCVEC_DLLEXPORT VecCreate_Shared(Vec vv)
53: {
55: PetscScalar *array;
58: PetscSplitOwnership(vv->comm,&vv->n,&vv->N);
59: PetscSharedMalloc(vv->comm,vv->n*sizeof(PetscScalar),vv->N*sizeof(PetscScalar),(void**)&array);
61: VecCreate_MPI_Private(vv,0,array,PETSC_NULL);
62: vv->ops->duplicate = VecDuplicate_Shared;
64: return(0);
65: }
69: /* ----------------------------------------------------------------------------------------
70: Code to manage shared memory allocation under the SGI with MPI
72: We associate with a communicator a shared memory "areana" from which memory may be shmalloced.
73: */
74: #include petscsys.h
75: #include "petscfix.h"
76: #if defined(PETSC_HAVE_PWD_H)
77: #include <pwd.h>
78: #endif
79: #include <ctype.h>
80: #include <sys/types.h>
81: #include <sys/stat.h>
82: #if defined(PETSC_HAVE_UNISTD_H)
83: #include <unistd.h>
84: #endif
85: #if defined(PETSC_HAVE_STDLIB_H)
86: #include <stdlib.h>
87: #endif
88: #if defined(PETSC_HAVE_SYS_PARAM_H)
89: #include <sys/param.h>
90: #endif
91: #if defined(PETSC_HAVE_SYS_UTSNAME_H)
92: #include <sys/utsname.h>
93: #endif
94: #include <fcntl.h>
95: #include <time.h>
96: #if defined(PETSC_HAVE_SYS_SYSTEMINFO_H)
97: #include <sys/systeminfo.h>
98: #endif
99: #include "petscfix.h"
101: static PetscMPIInt Petsc_Shared_keyval = MPI_KEYVAL_INVALID;
102: static PetscInt Petsc_Shared_size = 100000000;
106: /*
107: Private routine to delete internal storage when a communicator is freed.
108: This is called by MPI, not by users.
110: The binding for the first argument changed from MPI 1.0 to 1.1; in 1.0
111: it was MPI_Comm *comm.
112: */
113: static PetscErrorCode Petsc_DeleteShared(MPI_Comm comm,PetscInt keyval,void* attr_val,void* extra_state)
114: {
118: PetscFree(attr_val);
119: PetscFunctionReturn(MPI_SUCCESS);
120: }
124: PetscErrorCode PetscSharedMemorySetSize(PetscInt s)
125: {
127: Petsc_Shared_size = s;
128: return(0);
129: }
131: #include "petscfix.h"
133: #include <ulocks.h>
137: PetscErrorCode PetscSharedInitialize(MPI_Comm comm)
138: {
140: PetscMPIInt rank,flag;
141: char filename[PETSC_MAX_PATH_LEN];
142: usptr_t **arena;
146: if (Petsc_Shared_keyval == MPI_KEYVAL_INVALID) {
147: /*
148: The calling sequence of the 2nd argument to this function changed
149: between MPI Standard 1.0 and the revisions 1.1 Here we match the
150: new standard, if you are using an MPI implementation that uses
151: the older version you will get a warning message about the next line;
152: it is only a warning message and should do no harm.
153: */
154: MPI_Keyval_create(MPI_NULL_COPY_FN,Petsc_DeleteShared,&Petsc_Shared_keyval,0);
155: }
157: MPI_Attr_get(comm,Petsc_Shared_keyval,(void**)&arena,&flag);
159: if (!flag) {
160: /* This communicator does not yet have a shared memory areana */
161: PetscMalloc(sizeof(usptr_t*),&arena);
163: MPI_Comm_rank(comm,&rank);
164: if (!rank) {
165: PetscStrcpy(filename,"/tmp/PETScArenaXXXXXX");
166: #ifdef PETSC_HAVE_MKSTEMP
167: if (mkstemp(filename) < 0) {
168: SETERRQ1(PETSC_ERR_FILE_OPEN, "Unable to open temporary file %s", filename);
169: }
170: #else
171: if (!mktemp(filename)) {
172: SETERRQ1(PETSC_ERR_FILE_OPEN, "Unable to open temporary file %s", filename);
173: }
174: #endif
175: }
176: MPI_Bcast(filename,PETSC_MAX_PATH_LEN,MPI_CHAR,0,comm);
177: PetscOptionsGetInt(PETSC_NULL,"-shared_size",&Petsc_Shared_size,&flag);
178: usconfig(CONF_INITSIZE,Petsc_Shared_size);
179: *arena = usinit(filename);
180: MPI_Attr_put(comm,Petsc_Shared_keyval,arena);
181: }
183: return(0);
184: }
188: PetscErrorCode PetscSharedMalloc(MPI_Comm comm,PetscInt llen,PetscInt len,void **result)
189: {
190: char *value;
192: PetscInt shift;
193: PetscMPIInt rank,flag;
194: usptr_t **arena;
197: *result = 0;
198: if (Petsc_Shared_keyval == MPI_KEYVAL_INVALID) {
199: PetscSharedInitialize(comm);
200: }
201: MPI_Attr_get(comm,Petsc_Shared_keyval,(void**)&arena,&flag);
202: if (!flag) {
203: PetscSharedInitialize(comm);
204: MPI_Attr_get(comm,Petsc_Shared_keyval,(void**)&arena,&flag);
205: if (!flag) SETERRQ(PETSC_ERR_LIB,"Unable to initialize shared memory");
206: }
208: MPI_Scan(&llen,&shift,1,MPI_INT,MPI_SUM,comm);
209: shift -= llen;
211: MPI_Comm_rank(comm,&rank);
212: if (!rank) {
213: value = (char*)usmalloc((size_t) len,*arena);
214: if (!value) {
215: (*PetscErrorPrintf)("Unable to allocate shared memory location\n");
216: (*PetscErrorPrintf)("Run with option -shared_size <size> \n");
217: (*PetscErrorPrintf)("with size > %d \n",(int)(1.2*(Petsc_Shared_size+len)));
218: SETERRQ(PETSC_ERR_LIB,"Unable to malloc shared memory");
219: }
220: }
221: MPI_Bcast(&value,8,MPI_BYTE,0,comm);
222: value += shift;
224: return(0);
225: }
227: #else
236: PetscErrorCode PETSCVEC_DLLEXPORT VecCreate_Shared(Vec vv)
237: {
239: PetscMPIInt size;
242: MPI_Comm_size(vv->comm,&size);
243: if (size > 1) {
244: SETERRQ(PETSC_ERR_SUP_SYS,"No supported for shared memory vector objects on this machine");
245: }
246: VecCreate_Seq(vv);
247: return(0);
248: }
251: #endif
255: /*@C
256: VecCreateShared - Creates a parallel vector that uses shared memory.
258: Input Parameters:
259: . comm - the MPI communicator to use
260: . n - local vector length (or PETSC_DECIDE to have calculated if N is given)
261: . N - global vector length (or PETSC_DECIDE to have calculated if n is given)
263: Output Parameter:
264: . vv - the vector
266: Collective on MPI_Comm
267:
268: Notes:
269: Currently VecCreateShared() is available only on the SGI; otherwise,
270: this routine is the same as VecCreateMPI().
272: Use VecDuplicate() or VecDuplicateVecs() to form additional vectors of the
273: same type as an existing vector.
275: Level: advanced
277: Concepts: vectors^creating with shared memory
279: .seealso: VecCreateSeq(), VecCreate(), VecCreateMPI(), VecDuplicate(), VecDuplicateVecs(),
280: VecCreateGhost(), VecCreateMPIWithArray(), VecCreateGhostWithArray()
282: @*/
283: PetscErrorCode PETSCVEC_DLLEXPORT VecCreateShared(MPI_Comm comm,PetscInt n,PetscInt N,Vec *v)
284: {
288: VecCreate(comm,v);
289: VecSetSizes(*v,n,N);
290: VecSetType(*v,VECSHARED);
291: return(0);
292: }