Actual source code: mpi.h
1: /*
2: This is a special set of bindings for uni-processor use of MPI by the PETSc library.
3:
4: NOT ALL THE MPI CALLS ARE IMPLEMENTED CORRECTLY! Only those needed in PETSc.
6: For example,
7: * Does not implement send to self.
8: * Does not implement attributes correctly.
9: */
11: /*
12: The following info is a response to one of the petsc-maint questions
13: regarding MPIUNI.
15: MPIUNI was developed with the aim of getting PETSc compiled, and
16: usable in the absence of a full MPI implementation. With this, we
17: were able to provide PETSc on Windows, Windows64 even before any MPI
18: implementation was available on these platforms. [Or with certain
19: compilers - like borland, that do not have a useable MPI
20: implementation]
22: However - providing a seqential, standards compliant MPI
23: implementation is *not* the goal of MPIUNI. The development strategy
24: was - to make enough changes to it so that PETSc sources, examples
25: compile without errors, and runs in the uni-processor mode. This is
26: the reason each function is not documented.
28: PETSc usage of MPIUNI is primarily from C. However a minimal fortran
29: interface is also provided - to get PETSc fortran examples with a
30: few MPI calls working.
32: One of the optimzation with MPIUNI, is to avoid the function call
33: overhead, when possible. Hence most of the C functions are
34: implemented as macros. However the function calls cannot be avoided
35: with fortran usage.
37: Most PETSc objects have both sequential and parallel
38: implementations, which are separate. For eg: We have two types of
39: sparse matrix storage formats - SeqAIJ, and MPIAIJ. Some MPI
40: routines are used in the Seq part, but most of them are used in the
41: MPI part. The send/receive calls can be found mostly in the MPI
42: part.
44: When MPIUNI is used, only the Seq version of the PETSc objects are
45: used, even though the MPI variant of the objects are compiled. Since
46: there are no send/receive calls in the Seq variant, PETSc works fine
47: with MPIUNI in seq mode.
49: The reason some send/receive functions are defined to abort(), is to
50: detect sections of code that use send/receive functions, and gets
51: executed in the sequential mode. (which shouldn't happen in case of
52: PETSc).
54: Proper implementation of send/receive would involve writing a
55: function for each of them. Inside each of these functions, we have
56: to check if the send is to self or receive is from self, and then
57: doing the buffering accordingly (until the receive is called) - or
58: what if a nonblocking receive is called, do a copy etc.. Handling
59: the buffering aspects might be complicated enough, that in this
60: case, a proper implementation of MPI might as well be used. This is
61: the reason the send to self is not implemented in MPIUNI, and never
62: will be.
63:
64: Proper implementations of MPI [for eg: MPICH & OpenMPI] are
65: available for most machines. When these packages are available, Its
66: generally preferable to use one of them instead of MPIUNI - even if
67: the user is using PETSc sequentially.
69: - MPIUNI does not support all MPI functions [or functionality].
70: Hence it might not work with external packages or user code that
71: might have MPI calls in it.
73: - MPIUNI is not a standards compliant implementation for np=1.
74: For eg: if the user code has send/recv to self, then it will
75: abort. [Similar issues with a number of other MPI functionality]
76: However MPICH & OpenMPI are the correct implementations of MPI
77: standard for np=1.
79: - When user code uses multiple MPI based packages that have their
80: own *internal* stubs equivalent to MPIUNI - in sequential mode,
81: invariably these multiple implementations of MPI for np=1 conflict
82: with each other. The correct thing to do is: make all such
83: packages use the *same* MPI implementation for np=1. MPICH/OpenMPI
84: satisfy this requirement correctly [and hence the correct choice].
86: - Using MPICH/OpenMPI sequentially should have minimal
87: disadvantages. [for eg: these binaries can be run without
88: mpirun/mpiexec as ./executable, without requiring any extra
89: configurations for ssh/rsh/daemons etc..]. This should not be a
90: reason to avoid these packages for sequential use.
92: Instructions for building standalone MPIUNI [for eg: linux/gcc+gfortran]:
93: - extract include/mpiuni/mpi.h,mpif.f, src/sys/mpiuni/mpi.c from PETSc
94: - remove reference to petscconf.h from mpi.h
95: - gcc -c mpi.c -DPETSC_HAVE_STDLIB_H -DPETSC_HAVE_FORTRAN_UNDERSCORE
96: - ar cr libmpiuni.a mpi.o
98: */
103: /* Requred by abort() in mpi.c & for win64 */
104: #include "petscconf.h"
106: #if defined(__cplusplus)
108: #endif
110: /* require an int variable large enough to hold a pointer */
111: #if !defined(MPIUNI_INTPTR)
112: #define MPIUNI_INTPTR long
113: #endif
115: /*
117: MPIUNI_TMP is used in the macros below only to stop various C/C++ compilers
118: from generating warning messages about unused variables while compiling PETSc.
119: */
122: #define MPI_COMM_WORLD 1
123: #define MPI_COMM_SELF MPI_COMM_WORLD
124: #define MPI_COMM_NULL 0
125: #define MPI_SUCCESS 0
126: #define MPI_IDENT 0
127: #define MPI_CONGRUENT 0
128: #define MPI_SIMILAR 0
129: #define MPI_UNEQUAL 3
130: #define MPI_ANY_SOURCE (-2)
131: #define MPI_KEYVAL_INVALID 0
132: #define MPI_ERR_UNKNOWN 18
133: #define MPI_ERR_INTERN 21
134: #define MPI_ERR_OTHER 1
135: #define MPI_TAG_UB 0
136: #define MPI_ERRORS_RETURN 0
137: #define MPI_UNDEFINED (-32766)
139: /* External types */
140: typedef int MPI_Comm;
141: typedef void *MPI_Request;
142: typedef void *MPI_Group;
143: typedef struct {int MPI_TAG,MPI_SOURCE,MPI_ERROR;} MPI_Status;
144: typedef char *MPI_Errhandler;
145: typedef int MPI_Fint;
146: typedef int MPI_File;
147: typedef int MPI_Info;
148: typedef int MPI_Offset;
152: /* In order to handle datatypes, we make them into "sizeof(raw-type)";
153: this allows us to do the MPIUNI_Memcpy's easily */
154: #define MPI_Datatype int
155: #define MPI_FLOAT sizeof(float)
156: #define MPI_DOUBLE sizeof(double)
157: #define MPI_LONG_DOUBLE sizeof(long double)
158: #define MPI_CHAR sizeof(char)
159: #define MPI_BYTE sizeof(char)
160: #define MPI_INT sizeof(int)
161: #define MPI_LONG sizeof(long)
162: #define MPI_LONG_LONG_INT sizeof(long long)
163: #define MPI_SHORT sizeof(short)
164: #define MPI_UNSIGNED_SHORT sizeof(unsigned short)
165: #define MPI_UNSIGNED sizeof(unsigned)
166: #define MPI_UNSIGNED_CHAR sizeof(unsigned char)
167: #define MPI_UNSIGNED_LONG sizeof(unsigned long)
168: #define MPI_COMPLEX 2*sizeof(float)
169: #define MPI_DOUBLE_COMPLEX 2*sizeof(double)
170: #define MPI_FLOAT_INT (sizeof(float) + sizeof(int))
171: #define MPI_DOUBLE_INT (sizeof(double) + sizeof(int))
172: #define MPI_LONG_INT (sizeof(long) + sizeof(int))
173: #define MPI_SHORT_INT (sizeof(short) + sizeof(int))
174: #define MPI_2INT (2* sizeof(int))
176: #define MPI_REQUEST_NULL ((MPI_Request)0)
177: #define MPI_GROUP_NULL ((MPI_Group)0)
178: #define MPI_INFO_NULL ((MPI_Info)0)
179: #define MPI_BOTTOM (void *)0
180: typedef int MPI_Op;
182: #define MPI_MODE_RDONLY 0
183: #define MPI_MODE_WRONLY 0
184: #define MPI_MODE_CREATE 0
186: #define MPI_SUM 0
187: #define MPI_MAX 0
188: #define MPI_MIN 0
189: #define MPI_ANY_TAG (-1)
190: #define MPI_DATATYPE_NULL 0
191: #define MPI_PACKED 0
192: #define MPI_MAX_ERROR_STRING 2056
193: #define MPI_STATUS_IGNORE (MPI_Status *)1
194: #define MPI_ORDER_FORTRAN 57
195: #define MPI_IN_PLACE (void *) -1
197: /*
198: Prototypes of some functions which are implemented in mpi.c
199: */
200: typedef int (MPI_Copy_function)(MPI_Comm,int,void *,void *,void *,int *);
201: typedef int (MPI_Delete_function)(MPI_Comm,int,void *,void *);
202: typedef void (MPI_User_function)(void*, void *, int *, MPI_Datatype *);
204: /*
205: In order that the PETSc MPIUNI can be used with another package that has its
206: own MPIUni we map the following function names to a unique PETSc name. Those functions
207: are defined in mpi.c
208: */
209: #if defined(MPIUNI_AVOID_MPI_NAMESPACE)
210: #define MPI_Abort Petsc_MPI_Abort
211: #define MPI_Attr_get Petsc_MPI_Attr_get
212: #define MPI_Keyval_free Petsc_MPI_Keyval_free
213: #define MPI_Attr_put Petsc_MPI_Attr_put
214: #define MPI_Attr_delete Petsc_MPI_Attr_delete
215: #define MPI_Keyval_create Petsc_MPI_Keyval_create
216: #define MPI_Comm_free Petsc_MPI_Comm_free
217: #define MPI_Comm_dup Petsc_MPI_Comm_dup
218: #define MPI_Comm_create Petsc_MPI_Comm_create
219: #define MPI_Init Petsc_MPI_Init
220: #define MPI_Finalize Petsc_MPI_Finalize
221: #define MPI_Initialized Petsc_MPI_Initialized
222: #define MPI_Finalized Petsc_MPI_Finalized
223: #endif
240: #define MPI_Aint MPIUNI_INTPTR
241: /*
242: Routines we have replace with macros that do nothing
243: Some return error codes others return success
244: */
246: #define MPI_Comm_f2c(comm) (MPI_Comm)(comm)
247: #define MPI_Comm_c2f(comm) (MPI_Fint)(comm)
249: #define MPI_Send(buf,count,datatype,dest,tag,comm) \
250: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
251: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
252: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
253: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
254: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
255: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
256: MPI_Abort(MPI_COMM_WORLD,0))
257: #define MPI_Recv(buf,count,datatype,source,tag,comm,status) \
258: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
259: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
260: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
261: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (source),\
262: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
263: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
264: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (status),\
265: MPI_Abort(MPI_COMM_WORLD,0))
266: #define MPI_Get_count(status, datatype,count) \
267: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (status),\
268: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
269: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
270: MPI_Abort(MPI_COMM_WORLD,0))
271: #define MPI_Bsend(buf,count,datatype,dest,tag,comm) \
272: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
273: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
274: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
275: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
276: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
277: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
278: MPI_Abort(MPI_COMM_WORLD,0))
279: #define MPI_Ssend(buf,count, datatype,dest,tag,comm) \
280: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
281: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
282: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
283: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
284: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
285: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
286: MPI_Abort(MPI_COMM_WORLD,0))
287: #define MPI_Rsend(buf,count, datatype,dest,tag,comm) \
288: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
289: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
290: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
291: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
292: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
293: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
294: MPI_Abort(MPI_COMM_WORLD,0))
295: #define MPI_Buffer_attach(buffer,size) \
296: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buffer),\
297: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (size),\
298: MPI_SUCCESS)
299: #define MPI_Buffer_detach(buffer,size)\
300: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buffer),\
301: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (size),\
302: MPI_SUCCESS)
303: #define MPI_Ibsend(buf,count, datatype,dest,tag,comm,request) \
304: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
305: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
306: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
307: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
308: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
309: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
310: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
311: MPI_Abort(MPI_COMM_WORLD,0))
312: #define MPI_Issend(buf,count, datatype,dest,tag,comm,request) \
313: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
314: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
315: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
316: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
317: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
318: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
319: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
320: MPI_Abort(MPI_COMM_WORLD,0))
321: #define MPI_Irsend(buf,count, datatype,dest,tag,comm,request) \
322: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
323: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
324: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
325: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
326: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
327: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
328: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
329: MPI_Abort(MPI_COMM_WORLD,0))
330: #define MPI_Irecv(buf,count, datatype,source,tag,comm,request) \
331: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
332: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
333: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
334: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (source),\
335: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
336: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
337: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
338: MPI_Abort(MPI_COMM_WORLD,0))
339: #define MPI_Isend(buf,count, datatype,dest,tag,comm,request) \
340: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
341: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
342: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
343: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
344: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
345: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
346: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
347: MPI_Abort(MPI_COMM_WORLD,0))
348: #define MPI_Wait(request,status) \
349: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
350: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (status),\
351: MPI_SUCCESS)
352: #define MPI_Test(request,flag,status) \
353: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
354: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (status),\
355: *(flag) = 0, \
356: MPI_SUCCESS)
357: #define MPI_Request_free(request) \
358: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
359: MPI_SUCCESS)
360: #define MPI_Waitany(a,b,c,d) \
361: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (a),\
362: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (b),\
363: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (c),\
364: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (d),\
365: MPI_SUCCESS)
366: #define MPI_Testany(a,b,c,d,e) \
367: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (a),\
368: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (b),\
369: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (c),\
370: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (d),\
371: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (e),\
372: MPI_SUCCESS)
373: #define MPI_Waitall(count,array_of_requests,array_of_statuses) \
374: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
375: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_requests),\
376: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_statuses),\
377: MPI_SUCCESS)
378: #define MPI_Testall(count,array_of_requests,flag,array_of_statuses) \
379: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
380: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_requests),\
381: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (flag),\
382: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_statuses),\
383: MPI_SUCCESS)
384: #define MPI_Waitsome(incount,array_of_requests,outcount,\
385: array_of_indices,array_of_statuses) \
386: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (incount),\
387: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_requests),\
388: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (outcount),\
389: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_indices),\
390: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_statuses),\
391: MPI_SUCCESS)
392: #define MPI_Comm_group(comm,group) \
393: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
394: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (group),\
395: MPI_SUCCESS)
396: #define MPI_Group_incl(group,n,ranks,newgroup) \
397: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (group),\
398: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (n),\
399: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (ranks),\
400: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (newgroup),\
401: MPI_SUCCESS)
402: #define MPI_Testsome(incount,array_of_requests,outcount,\
403: array_of_indices,array_of_statuses) MPI_SUCCESS
404: #define MPI_Iprobe(source,tag,comm,flag,status) (*(flag)=0, MPI_SUCCESS)
405: #define MPI_Probe(source,tag,comm,status) MPI_SUCCESS
406: #define MPI_Cancel(request) (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),MPI_SUCCESS)
407: #define MPI_Test_cancelled(status,flag) (*(flag)=0,MPI_SUCCESS)
408: #define MPI_Send_init(buf,count, datatype,dest,tag,comm,request) \
409: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
410: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
411: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
412: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
413: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
414: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
415: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
416: MPI_SUCCESS)
417: #define MPI_Bsend_init(buf,count, datatype,dest,tag,comm,request) \
418: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
419: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
420: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
421: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
422: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
423: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
424: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
425: MPI_SUCCESS)
426: #define MPI_Ssend_init(buf,count, datatype,dest,tag,comm,request) \
427: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
428: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
429: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
430: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
431: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
432: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
433: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
434: MPI_SUCCESS)
435: #define MPI_Bsend_init(buf,count, datatype,dest,tag,comm,request) \
436: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
437: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
438: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
439: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
440: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
441: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
442: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
443: MPI_SUCCESS)
444: #define MPI_Rsend_init(buf,count, datatype,dest,tag,comm,request) \
445: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
446: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
447: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
448: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
449: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
450: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
451: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
452: MPI_SUCCESS)
453: #define MPI_Recv_init(buf,count, datatype,source,tag,comm,request) \
454: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
455: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
456: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
457: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (source),\
458: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
459: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
460: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
461: MPI_SUCCESS)
462: #define MPI_Start(request) (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),MPI_SUCCESS)
463: #define MPI_Startall(count,array_of_requests) \
464: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
465: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_requests),\
466: MPI_SUCCESS)
467: #define MPI_Op_create(function,commute,op) \
468: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (function),\
469: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (commute),\
470: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (op),\
471: MPI_SUCCESS)
472: #define MPI_Op_free(op) \
473: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (op),\
474: MPI_SUCCESS)
475: /* Need to determine sizeof "sendtype" */
476: #define MPI_Sendrecv(sendbuf,sendcount, sendtype,\
477: dest,sendtag,recvbuf,recvcount,\
478: recvtype,source,recvtag,\
479: comm,status) \
480: MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount) * (sendtype))
481: #define MPI_Sendrecv_replace(buf,count, datatype,dest,sendtag,\
482: source,recvtag,comm,status) MPI_SUCCESS
483: #define MPI_Type_contiguous(count, oldtype,newtype) \
484: (*(newtype) = (count)*(oldtype),MPI_SUCCESS)
485: #define MPI_Type_vector(count,blocklength,stride,oldtype, newtype) MPI_SUCCESS
486: #define MPI_Type_hvector(count,blocklength,stride,oldtype, newtype) MPI_SUCCESS
487: #define MPI_Type_indexed(count,array_of_blocklengths,\
488: array_of_displacements, oldtype,\
489: newtype) MPI_SUCCESS
490: #define MPI_Type_hindexed(count,array_of_blocklengths,\
491: array_of_displacements, oldtype,\
492: newtype) MPI_SUCCESS
493: #define MPI_Type_struct(count,array_of_blocklengths,\
494: array_of_displacements,\
495: array_of_types, newtype) MPI_SUCCESS
496: #define MPI_Address(location,address) \
497: (*(address) = (MPIUNI_INTPTR)(char *)(location),MPI_SUCCESS)
498: #define MPI_Type_extent(datatype,extent) \
499: MPI_Abort(MPI_COMM_WORLD,0)
500: #define MPI_Type_size(datatype,size) \
501: MPI_Abort(MPI_COMM_WORLD,0)
502: #define MPI_Type_lb(datatype,displacement) \
503: MPI_Abort(MPI_COMM_WORLD,0)
504: #define MPI_Type_ub(datatype,displacement) \
505: MPI_Abort(MPI_COMM_WORLD,0)
506: #define MPI_Type_commit(datatype) (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
507: MPI_SUCCESS)
508: #define MPI_Type_free(datatype) MPI_SUCCESS
509: #define MPI_Get_elements(status, datatype,count) \
510: MPI_Abort(MPI_COMM_WORLD,0)
511: #define MPI_Pack(inbuf,incount, datatype,outbuf,\
512: outsize,position, comm) \
513: MPI_Abort(MPI_COMM_WORLD,0)
514: #define MPI_Unpack(inbuf,insize,position,outbuf,\
515: outcount, datatype,comm) \
516: MPI_Abort(MPI_COMM_WORLD,0)
517: #define MPI_Pack_size(incount, datatype,comm,size) \
518: MPI_Abort(MPI_COMM_WORLD,0)
519: #define MPI_Barrier(comm) \
520: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
521: MPI_SUCCESS)
522: #define MPI_Bcast(buffer,count,datatype,root,comm) \
523: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buffer),\
524: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
525: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
526: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
527: MPI_SUCCESS)
528: #define MPI_Gather(sendbuf,sendcount, sendtype,\
529: recvbuf,recvcount, recvtype,\
530: root,comm) \
531: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvcount),\
532: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (root),\
533: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvtype),\
534: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
535: MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)* (sendtype)),\
536: MPI_SUCCESS)
537: #define MPI_Gatherv(sendbuf,sendcount, sendtype,\
538: recvbuf,recvcounts,displs,\
539: recvtype,root,comm) \
540: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvcounts),\
541: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (displs),\
542: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvtype),\
543: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (root),\
544: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
545: MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)* (sendtype)),\
546: MPI_SUCCESS)
547: #define MPI_Scatter(sendbuf,sendcount, sendtype,\
548: recvbuf,recvcount, recvtype,\
549: root,comm) \
550: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (sendbuf),\
551: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (sendcount),\
552: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (sendtype),\
553: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvbuf),\
554: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvcount),\
555: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvtype),\
556: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (root),\
557: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),MPI_Abort(MPI_COMM_WORLD,0))
558: #define MPI_Scatterv(sendbuf,sendcounts,displs,\
559: sendtype, recvbuf,recvcount,\
560: recvtype,root,comm) \
561: (MPIUNI_Memcpy(recvbuf,sendbuf,(recvcount)*(recvtype)),\
562: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (displs),\
563: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (sendtype),\
564: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (sendcounts),\
565: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (root),\
566: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
567: MPI_SUCCESS)
568: #define MPI_Allgather(sendbuf,sendcount, sendtype,\
569: recvbuf,recvcount, recvtype,comm) \
570: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvcount),\
571: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvtype),\
572: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
573: MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)* (sendtype)),\
574: MPI_SUCCESS)
575: #define MPI_Allgatherv(sendbuf,sendcount, sendtype,\
576: recvbuf,recvcounts,displs,recvtype,comm) \
577: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvcounts),\
578: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (displs),\
579: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvtype),\
580: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
581: (sendbuf != MPI_IN_PLACE) ? MPIUNI_Memcpy((recvbuf),(sendbuf),(sendcount)* (sendtype)) : 0, \
582: MPI_SUCCESS)
583: #define MPI_Alltoall(sendbuf,sendcount, sendtype,\
584: recvbuf,recvcount, recvtype,comm) \
585: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvcount),\
586: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvtype),\
587: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
588: MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)* (sendtype)),\
589: MPI_SUCCESS)
590: #define MPI_Alltoallv(sendbuf,sendcounts,sdispls,\
591: sendtype, recvbuf,recvcounts,\
592: rdispls, recvtype,comm) MPI_Abort(MPI_COMM_WORLD,0)
593: #define MPI_Alltoallw(sendbuf,sendcounts,sdispls,\
594: sendtypes, recvbuf,recvcounts,\
595: rdispls, recvtypes,comm) MPI_Abort(MPI_COMM_WORLD,0)
596: #define MPI_Reduce(sendbuf, recvbuf,count,\
597: datatype,op,root,comm) \
598: (MPIUNI_Memcpy(recvbuf,sendbuf,(count)*(datatype)),\
599: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),MPI_SUCCESS)
600: #define MPI_Allreduce(sendbuf, recvbuf,count,datatype,op,comm) \
601: (MPIUNI_Memcpy(recvbuf,sendbuf,(count)*(datatype)),\
602: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),MPI_SUCCESS)
603: #define MPI_Scan(sendbuf, recvbuf,count,datatype,op,comm) \
604: (MPIUNI_Memcpy(recvbuf,sendbuf,(count)*(datatype)),\
605: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),MPI_SUCCESS)
606: #define MPI_Reduce_scatter(sendbuf, recvbuf,recvcounts,\
607: datatype,op,comm) \
608: MPI_Abort(MPI_COMM_WORLD,0)
609: #define MPI_Group_size(group,size) (*(size)=1,MPI_SUCCESS)
610: #define MPI_Group_rank(group,rank) (*(rank)=0,MPI_SUCCESS)
611: #define MPI_Group_translate_ranks (group1,n,ranks1,\
612: group2,ranks2) MPI_Abort(MPI_COMM_WORLD,0)
613: #define MPI_Group_compare(group1,group2,result) \
614: (*(result)=1,MPI_SUCCESS)
615: #define MPI_Group_union(group1,group2,newgroup) MPI_SUCCESS
616: #define MPI_Group_intersection(group1,group2,newgroup) MPI_SUCCESS
617: #define MPI_Group_difference(group1,group2,newgroup) MPI_SUCCESS
618: #define MPI_Group_excl(group,n,ranks,newgroup) MPI_SUCCESS
619: #define MPI_Group_range_incl(group,n,ranges,newgroup) MPI_SUCCESS
620: #define MPI_Group_range_excl(group,n,ranges,newgroup) MPI_SUCCESS
621: #define MPI_Group_free(group) \
622: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (group),\
623: MPI_SUCCESS)
624: #define MPI_Comm_size(comm,size) \
625: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
626: *(size)=1,\
627: MPI_SUCCESS)
628: #define MPI_Comm_rank(comm,rank) \
629: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
630: *(rank)=0,\
631: MPI_SUCCESS)
632: #define MPI_Comm_compare(comm1,comm2,result) \
633: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm1),\
634: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm2),\
635: *(result)=MPI_IDENT,\
636: MPI_SUCCESS)
637: #define MPI_Comm_split(comm,color,key,newcomm) MPI_SUCCESS
638: #define MPI_Comm_test_inter(comm,flag) (*(flag)=1,MPI_SUCCESS)
639: #define MPI_Comm_remote_size(comm,size) (*(size)=1,MPI_SUCCESS)
640: #define MPI_Comm_remote_group(comm,group) MPI_SUCCESS
641: #define MPI_Intercomm_create(local_comm,local_leader,peer_comm,\
642: remote_leader,tag,newintercomm) MPI_SUCCESS
643: #define MPI_Intercomm_merge(intercomm,high,newintracomm) MPI_SUCCESS
645: #define MPI_Topo_test(comm,status) MPI_SUCCESS
646: #define MPI_Cart_create(comm_old,ndims,dims,periods,\
647: reorder,comm_cart) MPI_SUCCESS
648: #define MPI_Dims_create(nnodes,ndims,dims) MPI_Abort(MPI_COMM_WORLD,0)
649: #define MPI_Graph_create(comm,a,b,c,d,e) MPI_SUCCESS
650: #define MPI_Graphdims_Get(comm,nnodes,nedges) MPI_Abort(MPI_COMM_WORLD,0)
651: #define MPI_Graph_get(comm,a,b,c,d) MPI_Abort(MPI_COMM_WORLD,0)
652: #define MPI_Cartdim_get(comm,ndims) MPI_Abort(MPI_COMM_WORLD,0)
653: #define MPI_Cart_get(comm,maxdims,dims,periods,coords) \
654: MPI_Abort(MPI_COMM_WORLD,0)
655: #define MPI_Cart_rank(comm,coords,rank) MPI_Abort(MPI_COMM_WORLD,0)
656: #define MPI_Cart_coords(comm,rank,maxdims,coords) \
657: MPI_Abort(MPI_COMM_WORLD,0)
658: #define MPI_Graph_neighbors_count(comm,rank,nneighbors) \
659: MPI_Abort(MPI_COMM_WORLD,0)
660: #define MPI_Graph_neighbors(comm,rank,maxneighbors,neighbors) \
661: MPI_Abort(MPI_COMM_WORLD,0)
662: #define MPI_Cart_shift(comm,direction,disp,rank_source,rank_dest) \
663: MPI_Abort(MPI_COMM_WORLD,0)
664: #define MPI_Cart_sub(comm,remain_dims,newcomm) MPI_Abort(MPI_COMM_WORLD,0)
665: #define MPI_Cart_map(comm,ndims,dims,periods,newrank) MPI_Abort(MPI_COMM_WORLD,0)
666: #define MPI_Graph_map(comm,a,b,c,d) MPI_Abort(MPI_COMM_WORLD,0)
667: #define MPI_Get_processor_name(name,result_len) \
668: (MPIUNI_Memcpy(name,"localhost",9*sizeof(char)),name[10] = 0,*(result_len) = 10)
669: #define MPI_Errhandler_create(function,errhandler) \
670: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (errhandler),\
671: MPI_SUCCESS)
672: #define MPI_Errhandler_set(comm,errhandler) \
673: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
674: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (errhandler),\
675: MPI_SUCCESS)
676: #define MPI_Errhandler_get(comm,errhandler) MPI_SUCCESS
677: #define MPI_Errhandler_free(errhandler) MPI_SUCCESS
678: #define MPI_Error_string(errorcode,string,result_len) MPI_SUCCESS
679: #define MPI_Error_class(errorcode,errorclass) MPI_SUCCESS
680: #define MPI_Wtick() 1.0
681: #define MPI_Wtime() 0.0
682: #define MPI_Pcontrol(level) MPI_SUCCESS
684: #define MPI_NULL_COPY_FN 0
685: #define MPI_NULL_DELETE_FN 0
687: /* MPI-IO additions */
689: #define MPI_File_open(comm,filename,amode,info,mpi_fh) \
690: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm), \
691: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (filename), \
692: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (amode), \
693: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (info), \
694: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (mpi_fh), \
695: MPI_Abort(MPI_COMM_WORLD,0))
697: #define MPI_File_close(mpi_fh) \
698: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (mpi_fh), \
699: MPI_Abort(MPI_COMM_WORLD,0))
701: #define MPI_File_set_view(mpi_fh,disp,etype,filetype,datarep,info) \
702: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (mpi_fh), \
703: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (disp), \
704: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (etype), \
705: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (filetype), \
706: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datarep), \
707: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (info), \
708: MPI_Abort(MPI_COMM_WORLD,0))
710: #define MPI_Type_get_extent(datatype,lb,extent) \
711: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype), \
712: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (lb), \
713: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (extent), \
714: MPI_Abort(MPI_COMM_WORLD,0))
716: #define MPI_File_write_all(mpi_fh,buf,count,datatype,status) \
717: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (mpi_fh), \
718: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf), \
719: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count), \
720: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype), \
721: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (status), \
722: MPI_Abort(MPI_COMM_WORLD,0))
724: #define MPI_File_read_all(mpi_fh,buf,count,datatype,status) \
725: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (mpi_fh), \
726: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf), \
727: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count), \
728: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype), \
729: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (status), \
730: MPI_Abort(MPI_COMM_WORLD,0))
732: /* called from PetscInitialize() - so return success */
733: #define MPI_Register_datarep(name,read_conv_fn,write_conv_fn,extent_fn,state) \
734: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (name), \
735: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (read_conv_fn), \
736: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (write_conv_fn), \
737: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (extent_fn), \
738: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (state), \
739: MPI_SUCCESS)
741: #define MPI_Type_create_subarray(ndims,array_of_sizes,array_of_subsizes,array_of_starts,order,oldtype,newtype) \
742: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (ndims), \
743: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_sizes), \
744: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_subsizes), \
745: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_starts), \
746: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (order), \
747: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (oldtype), \
748: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (newtype), \
749: MPI_Abort(MPI_COMM_WORLD,0))
751: #if defined(__cplusplus)
752: }
753: #endif
754: #endif