Hi,

The attached patch corrects trivial typos in man files and
FUNC_NAME variables in ompi/mpi/c/*.c files.

One note which may not be trivial:
Before MPI-2.1, MPI standard says MPI_PACKED should be used for
MPI_{Pack,Unpack}_external. But in MPI-2.1, it was changed to
use MPI_BYTE. See 'B.3 Changes from Version 2.0 to Version 2.1'
(page 766) in MPI-3.0.

Though my patch is for OMPI trunk, I want to see these
corrections in 1.8 series.

Takahiro Kawashima,
MPI development team,
Fujitsu
Index: ompi/mpi/c/message_c2f.c
===================================================================
--- ompi/mpi/c/message_c2f.c	(revision 32173)
+++ ompi/mpi/c/message_c2f.c	(working copy)
@@ -35,7 +35,7 @@
 #include "ompi/mpi/c/profile/defines.h"
 #endif
 
-static const char FUNC_NAME[] = "MPI_Message_f2c";
+static const char FUNC_NAME[] = "MPI_Message_c2f";
 
 
 MPI_Fint MPI_Message_c2f(MPI_Message message) 
Index: ompi/mpi/c/get_accumulate.c
===================================================================
--- ompi/mpi/c/get_accumulate.c	(revision 32173)
+++ ompi/mpi/c/get_accumulate.c	(working copy)
@@ -41,7 +41,7 @@
 #include "ompi/mpi/c/profile/defines.h"
 #endif
 
-static const char FUNC_NAME[] = "MPI_Get_accumlate";
+static const char FUNC_NAME[] = "MPI_Get_accumulate";
 
 int MPI_Get_accumulate(const void *origin_addr, int origin_count, MPI_Datatype origin_datatype,
                        void *result_addr, int result_count, MPI_Datatype result_datatype,
Index: ompi/mpi/c/rget_accumulate.c
===================================================================
--- ompi/mpi/c/rget_accumulate.c	(revision 32173)
+++ ompi/mpi/c/rget_accumulate.c	(working copy)
@@ -42,7 +42,7 @@
 #include "ompi/mpi/c/profile/defines.h"
 #endif
 
-static const char FUNC_NAME[] = "MPI_Rget_accumlate";
+static const char FUNC_NAME[] = "MPI_Rget_accumulate";
 
 int MPI_Rget_accumulate(const void *origin_addr, int origin_count, MPI_Datatype origin_datatype,
                         void *result_addr, int result_count, MPI_Datatype result_datatype,
Index: ompi/mpi/c/request_c2f.c
===================================================================
--- ompi/mpi/c/request_c2f.c	(revision 32173)
+++ ompi/mpi/c/request_c2f.c	(working copy)
@@ -35,7 +35,7 @@
 #include "ompi/mpi/c/profile/defines.h"
 #endif
 
-static const char FUNC_NAME[] = "MPI_Request_f2c";
+static const char FUNC_NAME[] = "MPI_Request_c2f";
 
 
 MPI_Fint MPI_Request_c2f(MPI_Request request) 
Index: ompi/mpi/c/raccumulate.c
===================================================================
--- ompi/mpi/c/raccumulate.c	(revision 32173)
+++ ompi/mpi/c/raccumulate.c	(working copy)
@@ -41,7 +41,7 @@
 #include "ompi/mpi/c/profile/defines.h"
 #endif
 
-static const char FUNC_NAME[] = "MPI_Accumlate";
+static const char FUNC_NAME[] = "MPI_Raccumulate";
 
 int MPI_Raccumulate(void *origin_addr, int origin_count, MPI_Datatype origin_datatype,
                    int target_rank, MPI_Aint target_disp, int target_count,
Index: ompi/mpi/c/unpack_external.c
===================================================================
--- ompi/mpi/c/unpack_external.c	(revision 32173)
+++ ompi/mpi/c/unpack_external.c	(working copy)
@@ -37,7 +37,7 @@
 #include "ompi/mpi/c/profile/defines.h"
 #endif
 
-static const char FUNC_NAME[] = "MPI_Unpack_external ";
+static const char FUNC_NAME[] = "MPI_Unpack_external";
 
 
 int MPI_Unpack_external (const char datarep[], const void *inbuf, MPI_Aint insize,
Index: ompi/mpi/c/comm_size.c
===================================================================
--- ompi/mpi/c/comm_size.c	(revision 32173)
+++ ompi/mpi/c/comm_size.c	(working copy)
@@ -35,7 +35,7 @@
 #include "ompi/mpi/c/profile/defines.h"
 #endif
 
-static const char FUNC_NAME[] = "MPI_comm_size";
+static const char FUNC_NAME[] = "MPI_Comm_size";
 
 
 int MPI_Comm_size(MPI_Comm comm, int *size) 
Index: ompi/mpi/c/get_library_version.c
===================================================================
--- ompi/mpi/c/get_library_version.c	(revision 32173)
+++ ompi/mpi/c/get_library_version.c	(working copy)
@@ -31,7 +31,7 @@
 #include "ompi/mpi/c/profile/defines.h"
 #endif
 
-static const char FUNC_NAME[] = "MPI_get_library_version";
+static const char FUNC_NAME[] = "MPI_Get_library_version";
 
 
 int MPI_Get_library_version(char *version, int *resultlen) 
Index: ompi/mpi/c/ireduce_scatter_block.c
===================================================================
--- ompi/mpi/c/ireduce_scatter_block.c	(revision 32173)
+++ ompi/mpi/c/ireduce_scatter_block.c	(working copy)
@@ -39,7 +39,7 @@
 #include "ompi/mpi/c/profile/defines.h"
 #endif
 
-static const char FUNC_NAME[] = "MPI_Reduce_scatter_block";
+static const char FUNC_NAME[] = "MPI_Ireduce_scatter_block";
 
 
 int MPI_Ireduce_scatter_block(const void *sendbuf, void *recvbuf, int recvcount,
Index: ompi/mpi/man/man3/MPI_Pack_external.3in
===================================================================
--- ompi/mpi/man/man3/MPI_Pack_external.3in	(revision 32173)
+++ ompi/mpi/man/man3/MPI_Pack_external.3in	(working copy)
@@ -113,10 +113,10 @@
 		    buf, 1000, &position); 
 		MPI_Pack_external("external32", &msg, i, MPI_DOUBLE,
 		    buf, 1000, &position); 
-		MPI_Send(buf, position, MPI_PACKED, 1, 0,
+		MPI_Send(buf, position, MPI_BYTE, 1, 0,
 		    MPI_COMM_WORLD); 
 	} else {		/* RECEIVER CODE */ 
-		MPI_Recv(buf, 1, MPI_PACKED, 0, 0, MPI_COMM_WORLD,
+		MPI_Recv(buf, 1, MPI_BYTE, 0, 0, MPI_COMM_WORLD,
 		    MPI_STATUS_IGNORE);
 		MPI_Unpack_external("external32", buf, 1000, 
 		    MPI_INT, &i, 1, &position);
@@ -150,12 +150,12 @@
 send call with a send buffer that is the "concatenation" of the
 individual send buffers.
 .sp
-A packing unit can be sent using type MPI_PACKED. Any point-to-point
+A packing unit can be sent using type MPI_BYTE. Any point-to-point
 or collective communication function can be used to move the sequence
 of bytes that forms the packing unit from one process to another. This
 packing unit can now be received using any receive operation, with any
 datatype. (The type-matching rules are relaxed for messages sent with
-type MPI_PACKED.)
+type MPI_BYTE.)
 .sp
 A packing unit can be unpacked into several successive messages. This
 is effected by several successive related calls to
Index: ompi/mpi/man/man3/MPI_Exscan.3in
===================================================================
--- ompi/mpi/man/man3/MPI_Exscan.3in	(revision 32173)
+++ ompi/mpi/man/man3/MPI_Exscan.3in	(working copy)
@@ -19,7 +19,7 @@
 
 int MPI_Iexscan(const void *\fIsendbuf\fP, void *\fIrecvbuf\fP, int \fIcount\fP,
 	MPI_Datatype \fIdatatype\fP, MPI_Op \fIop\fP, MPI_Comm \fIcomm\fP,
-        MPI_Request \fI*request\fP)
+	MPI_Request \fI*request\fP)
 
 .fi
 .SH Fortran Syntax
Index: ompi/mpi/man/man3/MPI_Bcast.3in
===================================================================
--- ompi/mpi/man/man3/MPI_Bcast.3in	(revision 32173)
+++ ompi/mpi/man/man3/MPI_Bcast.3in	(working copy)
@@ -90,7 +90,7 @@
 .sp
 As in many of our sample code fragments, we assume that some of the variables (such as comm in the example above) have been assigned appropriate values.
 .sp
-WHEN COMMUNICATOR IS AN INTER-COMMUNICATOR
+.SH WHEN COMMUNICATOR IS AN INTER-COMMUNICATOR
 .sp
 When the communicator is an inter-communicator, the root process in the first group broadcasts data to all the processes in the second group.  The first group defines the root process.  That process uses MPI_ROOT as the value of its \fIroot\fR argument.  The remaining processes use MPI_PROC_NULL as the value of their \fIroot\fR argument.  All processes in the second group use the rank of that root process in the first group as the value of their \fIroot\fR argument.   The receive buffer arguments of the processes in the second group must be consistent with the send buffer argument of the root process in the first group.
 .sp  
Index: ompi/mpi/man/man3/MPI_Reduce_scatter_block.3in
===================================================================
--- ompi/mpi/man/man3/MPI_Reduce_scatter_block.3in	(revision 32173)
+++ ompi/mpi/man/man3/MPI_Reduce_scatter_block.3in	(working copy)
@@ -68,7 +68,7 @@
 .ft R
 
 MPI_Reduce_scatter_block first does an element-wise reduction on vector of \fIcount\fP\
- =\ n * \fIrevcount\fP elements in the send buffer defined by \fIsendbuf\fP, \fIcount\fP, and
+ =\ n * \fIrecvcount\fP elements in the send buffer defined by \fIsendbuf\fP, \fIcount\fP, and
 \fIdatatype\fP, using the operation \fIop\fP, where n is the number of
 processes in the group of \fIcomm\fP. Next, the resulting vector of results is split into n disjoint
 segments, where n is the number of processes in the group. Each segments contains \fIrecvcount\fP
Index: ompi/mpi/man/man3/MPI_Gather.3in
===================================================================
--- ompi/mpi/man/man3/MPI_Gather.3in	(revision 32173)
+++ ompi/mpi/man/man3/MPI_Gather.3in	(working copy)
@@ -30,7 +30,7 @@
 	INTEGER	\fISENDCOUNT, SENDTYPE, RECVCOUNT, RECVTYPE, ROOT\fP
 	INTEGER	\fICOMM, IERROR\fP 
 
-MPI_GATHER(\fISENDBUF, SENDCOUNT, SENDTYPE, RECVBUF, RECVCOUNT,
+MPI_IGATHER(\fISENDBUF, SENDCOUNT, SENDTYPE, RECVBUF, RECVCOUNT,
 		RECVTYPE, ROOT, COMM, REQUEST, IERROR\fP)
 	<type>	\fISENDBUF(*), RECVBUF(*)\fP
 	INTEGER	\fISENDCOUNT, SENDTYPE, RECVCOUNT, RECVTYPE, ROOT\fP
Index: ompi/mpi/man/man3/MPI_Reduce_scatter.3in
===================================================================
--- ompi/mpi/man/man3/MPI_Reduce_scatter.3in	(revision 32173)
+++ ompi/mpi/man/man3/MPI_Reduce_scatter.3in	(working copy)
@@ -77,7 +77,7 @@
 .ft R
 
 MPI_Reduce_scatter first does an element-wise reduction on vector of \fIcount\fP\
- =\ S(i)\fIrevcounts\fP[i] elements in the send buffer defined by \fIsendbuf\fP, \fIcount\fP, and
+ =\ S(i)\fIrecvcounts\fP[i] elements in the send buffer defined by \fIsendbuf\fP, \fIcount\fP, and
 \fIdatatype\fP. Next, the resulting vector of results is split into n disjoint
 segments, where n is the number of processes in the group. Segment i contains
 \fIrecvcounts\fP[i] elements. The ith segment is sent to process i and stored in
Index: ompi/mpi/man/man3/MPI_Allreduce.3in
===================================================================
--- ompi/mpi/man/man3/MPI_Allreduce.3in	(revision 32173)
+++ ompi/mpi/man/man3/MPI_Allreduce.3in	(working copy)
@@ -27,7 +27,7 @@
 	<type>	\fISENDBUF\fP(*), \fIRECVBUF\fP(*)
 	INTEGER	\fICOUNT\fP,\fI DATATYPE\fP,\fI OP\fP,\fI COMM\fP,\fI IERROR\fP
 
-MPI_ALLREDUCE(\fISENDBUF, RECVBUF, COUNT, DATATYPE, OP, COMM, REQUEST, IERROR\fP)
+MPI_IALLREDUCE(\fISENDBUF, RECVBUF, COUNT, DATATYPE, OP, COMM, REQUEST, IERROR\fP)
 	<type>	\fISENDBUF\fP(*)\fI, RECVBUF\fP(*)
 	INTEGER	\fICOUNT, DATATYPE, OP, COMM, REQUEST, IERROR\fP
 
Index: ompi/mpi/man/man3/MPI_Unpack_external.3in
===================================================================
--- ompi/mpi/man/man3/MPI_Unpack_external.3in	(revision 32173)
+++ ompi/mpi/man/man3/MPI_Unpack_external.3in	(working copy)
@@ -127,12 +127,12 @@
 send call with a send buffer that is the "concatenation" of the
 individual send buffers.
 .sp
-A packing unit can be sent using type MPI_PACKED. Any point-to-point
+A packing unit can be sent using type MPI_BYTE. Any point-to-point
 or collective communication function can be used to move the sequence
 of bytes that forms the packing unit from one process to another. This
 packing unit can now be received using any receive operation, with any
 datatype: The type-matching rules are relaxed for messages sent with
-type MPI_PACKED.
+type MPI_BYTE.
 .sp
 A packing unit can be unpacked into several successive messages. This
 is effected by several successive related calls to
Index: ompi/mpi/man/man3/MPI_Mprobe.3in
===================================================================
--- ompi/mpi/man/man3/MPI_Mprobe.3in	(revision 32173)
+++ ompi/mpi/man/man3/MPI_Mprobe.3in	(working copy)
@@ -56,8 +56,8 @@
 
 .SH DESCRIPTION
 .ft R
-Like MPI_Probe and MPI_Iprobe, the MPI_Mprobe and MPI_Improbe opera-
-tions allow incoming messages to be queried without actually receiving
+Like MPI_Probe and MPI_Iprobe, the MPI_Mprobe and MPI_Improbe operations
+allow incoming messages to be queried without actually receiving
 them, except that MPI_Mprobe and MPI_Improbe provide a mechanism to
 receive the specific message that was matched regardless of other
 intervening probe or receive operations.  This gives the application
Index: ompi/mpi/man/man3/MPI_Improbe.3in
===================================================================
--- ompi/mpi/man/man3/MPI_Improbe.3in	(revision 32173)
+++ ompi/mpi/man/man3/MPI_Improbe.3in	(working copy)
@@ -61,8 +61,8 @@
 
 .SH DESCRIPTION
 .ft R
-Like MPI_Probe and MPI_Iprobe, the MPI_Mprobe and MPI_Improbe opera-
-tions allow incoming messages to be queried without actually receiving
+Like MPI_Probe and MPI_Iprobe, the MPI_Mprobe and MPI_Improbe operations
+allow incoming messages to be queried without actually receiving
 them, except that MPI_Mprobe and MPI_Improbe provide a mechanism to
 receive the specific message that was matched regardless of other
 intervening probe or receive operations.  This gives the application

Reply via email to