本文整理汇总了C++中MPIR_Err_create_code函数的典型用法代码示例。如果您正苦于以下问题:C++ MPIR_Err_create_code函数的具体用法?C++ MPIR_Err_create_code怎么用?C++ MPIR_Err_create_code使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了MPIR_Err_create_code函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: start
//.........这里部分代码省略.........
.N ThreadSafe
.N Fortran
.N Errors
.N MPI_SUCCESS
.N MPI_ERR_COMM
.N MPI_ERR_COUNT
.N MPI_ERR_TYPE
.N MPI_ERR_ARG
.seealso: MPI_Pack, MPI_Pack_size
@*/
int MPI_Unpack(const void *inbuf, int insize, int *position,
void *outbuf, int outcount, MPI_Datatype datatype,
MPI_Comm comm)
{
int mpi_errno = MPI_SUCCESS;
MPI_Aint position_x;
MPID_Comm *comm_ptr = NULL;
MPID_MPI_STATE_DECL(MPID_STATE_MPI_UNPACK);
MPIR_ERRTEST_INITIALIZED_ORDIE();
MPID_MPI_FUNC_ENTER(MPID_STATE_MPI_UNPACK);
/* Validate parameters, especially handles needing to be converted */
# ifdef HAVE_ERROR_CHECKING
{
MPID_BEGIN_ERROR_CHECKS;
{
MPIR_ERRTEST_COMM(comm, mpi_errno);
}
MPID_END_ERROR_CHECKS;
}
# endif
/* Convert MPI object handles to object pointers */
MPID_Comm_get_ptr(comm, comm_ptr);
/* Validate parameters and objects (post conversion) */
# ifdef HAVE_ERROR_CHECKING
{
MPID_BEGIN_ERROR_CHECKS;
{
if (insize > 0) {
MPIR_ERRTEST_ARGNULL(inbuf, "input buffer", mpi_errno);
}
/* Note: outbuf could be MPI_BOTTOM; don't test for NULL */
MPIR_ERRTEST_COUNT(insize, mpi_errno);
MPIR_ERRTEST_COUNT(outcount, mpi_errno);
/* Validate comm_ptr */
MPID_Comm_valid_ptr( comm_ptr, mpi_errno, FALSE );
if (mpi_errno != MPI_SUCCESS) goto fn_fail;
/* If comm_ptr is not valid, it will be reset to null */
MPIR_ERRTEST_DATATYPE(datatype, "datatype", mpi_errno);
if (datatype != MPI_DATATYPE_NULL &&
HANDLE_GET_KIND(datatype) != HANDLE_KIND_BUILTIN) {
MPID_Datatype *datatype_ptr = NULL;
MPID_Datatype_get_ptr(datatype, datatype_ptr);
MPID_Datatype_valid_ptr(datatype_ptr, mpi_errno);
MPID_Datatype_committed_ptr(datatype_ptr, mpi_errno);
}
if (mpi_errno != MPI_SUCCESS) goto fn_fail;
}
MPID_END_ERROR_CHECKS;
}
# endif /* HAVE_ERROR_CHECKING */
/* ... body of routine ... */
position_x = *position;
mpi_errno = MPIR_Unpack_impl(inbuf, insize, &position_x, outbuf, outcount, datatype);
if (mpi_errno) goto fn_fail;
MPIU_Assign_trunc(*position, position_x, int);
/* ... end of body of routine ... */
fn_exit:
MPID_MPI_FUNC_EXIT(MPID_STATE_MPI_UNPACK);
return mpi_errno;
fn_fail:
/* --BEGIN ERROR HANDLING-- */
# ifdef HAVE_ERROR_CHECKING
{
mpi_errno = MPIR_Err_create_code(
mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**mpi_unpack",
"**mpi_unpack %p %d %p %p %d %D %C", inbuf, insize, position, outbuf, outcount, datatype, comm);
}
# endif
mpi_errno = MPIR_Err_return_comm(comm_ptr, FCNAME, mpi_errno);
goto fn_exit;
/* --END ERROR HANDLING-- */
}
示例2: group1
/*@
MPI_Group_translate_ranks - Translates the ranks of processes in one group to
those in another group
Input Parameters:
+ group1 - group1 (handle)
. n - number of ranks in 'ranks1' and 'ranks2' arrays (integer)
. ranks1 - array of zero or more valid ranks in 'group1'
- group2 - group2 (handle)
Output Parameters:
. ranks2 - array of corresponding ranks in group2, 'MPI_UNDEFINED' when no
correspondence exists.
As a special case (see the MPI-2 errata), if the input rank is
'MPI_PROC_NULL', 'MPI_PROC_NULL' is given as the output rank.
.N ThreadSafe
.N Fortran
.N Errors
.N MPI_SUCCESS
@*/
int MPI_Group_translate_ranks(MPI_Group group1, int n, const int ranks1[],
MPI_Group group2, int ranks2[])
{
int mpi_errno = MPI_SUCCESS;
MPIR_Group *group_ptr1 = NULL;
MPIR_Group *group_ptr2 = NULL;
MPIR_FUNC_TERSE_STATE_DECL(MPID_STATE_MPI_GROUP_TRANSLATE_RANKS);
MPIR_ERRTEST_INITIALIZED_ORDIE();
/* The routines that setup the group data structures must be executed
* within a mutex. As most of the group routines are not performance
* critical, we simple run these routines within the SINGLE_CS */
MPID_THREAD_CS_ENTER(GLOBAL, MPIR_THREAD_GLOBAL_ALLFUNC_MUTEX);
MPIR_FUNC_TERSE_ENTER(MPID_STATE_MPI_GROUP_TRANSLATE_RANKS);
/* Validate parameters, especially handles needing to be converted */
#ifdef HAVE_ERROR_CHECKING
{
MPID_BEGIN_ERROR_CHECKS;
{
MPIR_ERRTEST_GROUP(group1, mpi_errno);
MPIR_ERRTEST_GROUP(group2, mpi_errno);
}
MPID_END_ERROR_CHECKS;
}
#endif
/* Convert MPI object handles to object pointers */
MPIR_Group_get_ptr(group1, group_ptr1);
MPIR_Group_get_ptr(group2, group_ptr2);
/* Validate parameters and objects (post conversion) */
#ifdef HAVE_ERROR_CHECKING
{
MPID_BEGIN_ERROR_CHECKS;
{
/* Validate group_ptr */
MPIR_Group_valid_ptr(group_ptr1, mpi_errno);
MPIR_Group_valid_ptr(group_ptr2, mpi_errno);
/* If either group_ptr is not valid, it will be reset to null */
MPIR_ERRTEST_ARGNEG(n, "n", mpi_errno);
if (group_ptr1) {
/* Check that the rank entries are valid */
int size1 = group_ptr1->size;
int i;
for (i = 0; i < n; i++) {
if ((ranks1[i] < 0 && ranks1[i] != MPI_PROC_NULL) || ranks1[i] >= size1) {
mpi_errno = MPIR_Err_create_code(MPI_SUCCESS,
MPIR_ERR_RECOVERABLE, FCNAME, __LINE__,
MPI_ERR_RANK,
"**rank", "**rank %d %d",
ranks1[i], size1);
goto fn_fail;
}
}
}
MPIR_ERRTEST_ARGNULL(ranks2, "ranks2", mpi_errno);
}
MPID_END_ERROR_CHECKS;
}
#endif /* HAVE_ERROR_CHECKING */
/* ... body of routine ... */
mpi_errno = MPIR_Group_translate_ranks_impl(group_ptr1, n, ranks1, group_ptr2, ranks2);
if (mpi_errno)
MPIR_ERR_POP(mpi_errno);
/* ... end of body of routine ... */
fn_exit:
MPIR_FUNC_TERSE_EXIT(MPID_STATE_MPI_GROUP_TRANSLATE_RANKS);
MPID_THREAD_CS_EXIT(GLOBAL, MPIR_THREAD_GLOBAL_ALLFUNC_MUTEX);
return mpi_errno;
//.........这里部分代码省略.........
示例3: value
/*@
MPI_Comm_free_keyval - Frees an attribute key for communicators
Input Parameters:
. comm_keyval - Frees the integer key value (integer)
Notes:
Key values are global (they can be used with any and all communicators)
.N ThreadSafe
.N Fortran
.N Errors
.N MPI_SUCCESS
.N MPI_ERR_ARG
.N MPI_ERR_PERM_KEY
@*/
int MPI_Comm_free_keyval(int *comm_keyval)
{
MPID_Keyval *keyval_ptr = NULL;
int mpi_errno = MPI_SUCCESS;
MPID_MPI_STATE_DECL(MPID_STATE_MPI_COMM_FREE_KEYVAL);
MPIR_ERRTEST_INITIALIZED_ORDIE();
MPIU_THREAD_CS_ENTER(ALLFUNC,);
MPID_MPI_FUNC_ENTER(MPID_STATE_MPI_COMM_FREE_KEYVAL);
/* Validate parameters, especially handles needing to be converted */
# ifdef HAVE_ERROR_CHECKING
{
MPID_BEGIN_ERROR_CHECKS;
{
MPIR_ERRTEST_ARGNULL(comm_keyval, "comm_keyval", mpi_errno);
MPIR_ERRTEST_KEYVAL(*comm_keyval, MPID_COMM, "communicator", mpi_errno);
MPIR_ERRTEST_KEYVAL_PERM(*comm_keyval, mpi_errno);
}
MPID_END_ERROR_CHECKS;
}
# endif
/* Convert MPI object handles to object pointers */
MPID_Keyval_get_ptr( *comm_keyval, keyval_ptr );
/* Validate parameters and objects (post conversion) */
# ifdef HAVE_ERROR_CHECKING
{
MPID_BEGIN_ERROR_CHECKS;
{
MPID_Keyval_valid_ptr( keyval_ptr, mpi_errno );
if (mpi_errno) goto fn_fail;
}
MPID_END_ERROR_CHECKS;
}
# endif /* HAVE_ERROR_CHECKING */
/* ... body of routine ... */
MPIR_Comm_free_keyval_impl(*comm_keyval);
*comm_keyval = MPI_KEYVAL_INVALID;
/* ... end of body of routine ... */
#ifdef HAVE_ERROR_CHECKING
fn_exit:
#endif
MPID_MPI_FUNC_EXIT(MPID_STATE_MPI_COMM_FREE_KEYVAL);
MPIU_THREAD_CS_EXIT(ALLFUNC,);
return mpi_errno;
/* --BEGIN ERROR HANDLING-- */
# ifdef HAVE_ERROR_CHECKING
fn_fail:
{
mpi_errno = MPIR_Err_create_code(
mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER,
"**mpi_comm_free_keyval",
"**mpi_comm_free_keyval %p", comm_keyval);
}
mpi_errno = MPIR_Err_return_comm( NULL, FCNAME, mpi_errno );
goto fn_exit;
# endif
/* --END ERROR HANDLING-- */
}
示例4: group
/*@
MPI_Type_create_darray - Create a datatype representing a distributed array
Input Parameters:
+ size - size of process group (positive integer)
. rank - rank in process group (nonnegative integer)
. ndims - number of array dimensions as well as process grid dimensions (positive integer)
. array_of_gsizes - number of elements of type oldtype in each dimension of global array (array of positive integers)
. array_of_distribs - distribution of array in each dimension (array of state)
. array_of_dargs - distribution argument in each dimension (array of positive integers)
. array_of_psizes - size of process grid in each dimension (array of positive integers)
. order - array storage order flag (state)
- oldtype - old datatype (handle)
Output Parameters:
. newtype - new datatype (handle)
.N ThreadSafe
.N Fortran
.N Errors
.N MPI_SUCCESS
.N MPI_ERR_TYPE
.N MPI_ERR_ARG
@*/
int MPI_Type_create_darray(int size,
int rank,
int ndims,
const int array_of_gsizes[],
const int array_of_distribs[],
const int array_of_dargs[],
const int array_of_psizes[],
int order,
MPI_Datatype oldtype,
MPI_Datatype *newtype)
{
static const char FCNAME[] = "MPI_Type_create_darray";
int mpi_errno = MPI_SUCCESS, i;
MPI_Datatype new_handle;
int procs, tmp_rank, tmp_size, blklens[3], *coords;
MPI_Aint *st_offsets, orig_extent, disps[3];
MPI_Datatype type_old, type_new = MPI_DATATYPE_NULL, types[3];
# ifdef HAVE_ERROR_CHECKING
MPI_Aint size_with_aint;
MPI_Offset size_with_offset;
# endif
int *ints;
MPID_Datatype *datatype_ptr = NULL;
MPIU_CHKLMEM_DECL(3);
MPID_MPI_STATE_DECL(MPID_STATE_MPI_TYPE_CREATE_DARRAY);
MPIR_ERRTEST_INITIALIZED_ORDIE();
MPIU_THREAD_CS_ENTER(ALLFUNC,);
MPID_MPI_FUNC_ENTER(MPID_STATE_MPI_TYPE_CREATE_DARRAY);
/* Validate parameters, especially handles needing to be converted */
# ifdef HAVE_ERROR_CHECKING
{
MPID_BEGIN_ERROR_CHECKS;
{
MPIR_ERRTEST_DATATYPE(oldtype, "datatype", mpi_errno);
}
MPID_END_ERROR_CHECKS;
}
# endif
/* Convert MPI object handles to object pointers */
MPID_Datatype_get_ptr(oldtype, datatype_ptr);
MPID_Datatype_get_extent_macro(oldtype, orig_extent);
/* Validate parameters and objects (post conversion) */
# ifdef HAVE_ERROR_CHECKING
{
MPID_BEGIN_ERROR_CHECKS;
{
/* Check parameters */
MPIR_ERRTEST_ARGNONPOS(size, "size", mpi_errno, MPI_ERR_ARG);
/* use MPI_ERR_RANK class for PE-MPI compatibility */
MPIU_ERR_CHKANDJUMP3((rank < 0 || rank >= size), mpi_errno, MPI_ERR_RANK,
"**argrange", "**argrange %s %d %d", "rank", rank, (size-1));
MPIR_ERRTEST_ARGNONPOS(ndims, "ndims", mpi_errno, MPI_ERR_DIMS);
MPIR_ERRTEST_ARGNULL(array_of_gsizes, "array_of_gsizes", mpi_errno);
MPIR_ERRTEST_ARGNULL(array_of_distribs, "array_of_distribs", mpi_errno);
MPIR_ERRTEST_ARGNULL(array_of_dargs, "array_of_dargs", mpi_errno);
MPIR_ERRTEST_ARGNULL(array_of_psizes, "array_of_psizes", mpi_errno);
if (order != MPI_ORDER_C && order != MPI_ORDER_FORTRAN) {
mpi_errno = MPIR_Err_create_code(MPI_SUCCESS,
MPIR_ERR_RECOVERABLE,
FCNAME,
__LINE__,
MPI_ERR_ARG,
"**arg",
"**arg %s",
"order");
//.........这里部分代码省略.........
示例5: communicator
/*@
MPI_Barrier - Blocks until all processes in the communicator have
reached this routine.
Input Parameters:
. comm - communicator (handle)
Notes:
Blocks the caller until all processes in the communicator have called it;
that is, the call returns at any process only after all members of the
communicator have entered the call.
.N ThreadSafe
.N Fortran
.N Errors
.N MPI_SUCCESS
.N MPI_ERR_COMM
@*/
int MPI_Barrier( MPI_Comm comm )
{
int mpi_errno = MPI_SUCCESS;
MPID_Comm *comm_ptr = NULL;
mpir_errflag_t errflag = MPIR_ERR_NONE;
MPID_MPI_STATE_DECL(MPID_STATE_MPI_BARRIER);
MPIR_ERRTEST_INITIALIZED_ORDIE();
MPIU_THREAD_CS_ENTER(ALLFUNC,);
MPID_MPI_COLL_FUNC_ENTER(MPID_STATE_MPI_BARRIER);
/* Validate parameters, especially handles needing to be converted */
# ifdef HAVE_ERROR_CHECKING
{
MPID_BEGIN_ERROR_CHECKS;
{
MPIR_ERRTEST_COMM(comm, mpi_errno);
}
MPID_END_ERROR_CHECKS;
}
# endif /* HAVE_ERROR_CHECKING */
/* Convert MPI object handles to object pointers */
MPID_Comm_get_ptr( comm, comm_ptr );
/* Validate parameters and objects (post conversion) */
# ifdef HAVE_ERROR_CHECKING
{
MPID_BEGIN_ERROR_CHECKS;
{
/* Validate communicator */
MPID_Comm_valid_ptr( comm_ptr, mpi_errno, FALSE );
if (mpi_errno) goto fn_fail;
}
MPID_END_ERROR_CHECKS;
}
# endif /* HAVE_ERROR_CHECKING */
/* ... body of routine ... */
mpi_errno = MPIR_Barrier_impl(comm_ptr, &errflag);
if (mpi_errno) goto fn_fail;
/* ... end of body of routine ... */
fn_exit:
MPID_MPI_COLL_FUNC_EXIT(MPID_STATE_MPI_BARRIER);
MPIU_THREAD_CS_EXIT(ALLFUNC,);
return mpi_errno;
fn_fail:
/* --BEGIN ERROR HANDLING-- */
# ifdef HAVE_ERROR_CHECKING
{
mpi_errno = MPIR_Err_create_code(
mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER,
"**mpi_barrier", "**mpi_barrier %C", comm);
}
# endif
mpi_errno = MPIR_Err_return_comm( comm_ptr, FCNAME, mpi_errno );
goto fn_exit;
/* --END ERROR HANDLING-- */
}
示例6: buffer
//.........这里部分代码省略.........
}
MPID_END_ERROR_CHECKS;
}
# endif /* HAVE_ERROR_CHECKING */
/* Convert MPI object handles to object pointers */
MPID_Comm_get_ptr( comm, comm_ptr );
/* Validate parameters and objects (post conversion) */
# ifdef HAVE_ERROR_CHECKING
{
MPID_BEGIN_ERROR_CHECKS;
{
MPID_Datatype *sendtype_ptr=NULL, *recvtype_ptr=NULL;
int i, comm_size;
int check_send = (comm_ptr->comm_kind == MPID_INTRACOMM && sendbuf != MPI_IN_PLACE);
MPID_Comm_valid_ptr( comm_ptr, mpi_errno, FALSE );
if (mpi_errno != MPI_SUCCESS) goto fn_fail;
if (comm_ptr->comm_kind == MPID_INTRACOMM) {
comm_size = comm_ptr->local_size;
if (sendbuf != MPI_IN_PLACE && sendtype == recvtype && sendcounts == recvcounts)
MPIR_ERRTEST_ALIAS_COLL(sendbuf, recvbuf, mpi_errno);
} else
comm_size = comm_ptr->remote_size;
if (comm_ptr->comm_kind == MPID_INTERCOMM && sendbuf == MPI_IN_PLACE) {
MPIR_ERR_SETANDJUMP(mpi_errno, MPI_ERR_OTHER, "**sendbuf_inplace");
}
for (i=0; i<comm_size; i++) {
if (check_send) {
MPIR_ERRTEST_COUNT(sendcounts[i], mpi_errno);
MPIR_ERRTEST_DATATYPE(sendtype, "sendtype", mpi_errno);
}
MPIR_ERRTEST_COUNT(recvcounts[i], mpi_errno);
MPIR_ERRTEST_DATATYPE(recvtype, "recvtype", mpi_errno);
}
if (check_send && HANDLE_GET_KIND(sendtype) != HANDLE_KIND_BUILTIN) {
MPID_Datatype_get_ptr(sendtype, sendtype_ptr);
MPID_Datatype_valid_ptr( sendtype_ptr, mpi_errno );
if (mpi_errno != MPI_SUCCESS) goto fn_fail;
MPID_Datatype_committed_ptr( sendtype_ptr, mpi_errno );
if (mpi_errno != MPI_SUCCESS) goto fn_fail;
}
if (HANDLE_GET_KIND(recvtype) != HANDLE_KIND_BUILTIN) {
MPID_Datatype_get_ptr(recvtype, recvtype_ptr);
MPID_Datatype_valid_ptr( recvtype_ptr, mpi_errno );
if (mpi_errno != MPI_SUCCESS) goto fn_fail;
MPID_Datatype_committed_ptr( recvtype_ptr, mpi_errno );
if (mpi_errno != MPI_SUCCESS) goto fn_fail;
}
for (i=0; i<comm_size && check_send; i++) {
if (sendcounts[i] > 0) {
MPIR_ERRTEST_USERBUFFER(sendbuf,sendcounts[i],sendtype,mpi_errno);
}
}
for (i=0; i<comm_size; i++) {
if (recvcounts[i] > 0) {
MPIR_ERRTEST_RECVBUF_INPLACE(recvbuf, recvcounts[i], mpi_errno);
MPIR_ERRTEST_USERBUFFER(recvbuf,recvcounts[i],recvtype,mpi_errno);
break;
}
}
}
MPID_END_ERROR_CHECKS;
}
# endif /* HAVE_ERROR_CHECKING */
/* ... body of routine ... */
mpi_errno = MPIR_Alltoallv_impl(sendbuf, sendcounts, sdispls,
sendtype, recvbuf, recvcounts,
rdispls, recvtype, comm_ptr, &errflag);
if (mpi_errno) goto fn_fail;
/* ... end of body of routine ... */
fn_exit:
MPID_MPI_COLL_FUNC_EXIT(MPID_STATE_MPI_ALLTOALLV);
MPID_THREAD_CS_EXIT(GLOBAL, MPIR_THREAD_GLOBAL_ALLFUNC_MUTEX);
return mpi_errno;
fn_fail:
/* --BEGIN ERROR HANDLING-- */
# ifdef HAVE_ERROR_CHECKING
{
mpi_errno = MPIR_Err_create_code(
mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**mpi_alltoallv",
"**mpi_alltoallv %p %p %p %D %p %p %p %D %C", sendbuf, sendcounts, sdispls, sendtype,
recvbuf, recvcounts, rdispls, recvtype, comm);
}
# endif
mpi_errno = MPIR_Err_return_comm( comm_ptr, FCNAME, mpi_errno );
goto fn_exit;
/* --END ERROR HANDLING-- */
}
示例7: MPID_Ssend
int MPID_Ssend(const void * buf, MPI_Aint count, MPI_Datatype datatype, int rank, int tag, MPID_Comm * comm, int context_offset,
MPID_Request ** request)
{
MPIDI_msg_sz_t data_sz;
int dt_contig;
MPI_Aint dt_true_lb;
MPID_Datatype * dt_ptr;
MPID_Request * sreq = NULL;
MPIDI_VC_t * vc;
#if defined(MPID_USE_SEQUENCE_NUMBERS)
MPID_Seqnum_t seqnum;
#endif
int eager_threshold = -1;
int mpi_errno = MPI_SUCCESS;
MPIDI_STATE_DECL(MPID_STATE_MPID_SSEND);
MPIDI_FUNC_ENTER(MPID_STATE_MPID_SSEND);
MPIU_DBG_MSG_FMT(CH3_OTHER,VERBOSE,(MPIU_DBG_FDEST,
"rank=%d, tag=%d, context=%d",
rank, tag, comm->context_id + context_offset));
/* Check to make sure the communicator hasn't already been revoked */
if (comm->revoked &&
MPIR_AGREE_TAG != MPIR_TAG_MASK_ERROR_BITS(tag & ~MPIR_Process.tagged_coll_mask) &&
MPIR_SHRINK_TAG != MPIR_TAG_MASK_ERROR_BITS(tag & ~MPIR_Process.tagged_coll_mask)) {
MPIR_ERR_SETANDJUMP(mpi_errno,MPIX_ERR_REVOKED,"**revoked");
}
if (rank == comm->rank && comm->comm_kind != MPID_INTERCOMM)
{
mpi_errno = MPIDI_Isend_self(buf, count, datatype, rank, tag, comm,
context_offset, MPIDI_REQUEST_TYPE_SSEND,
&sreq);
/* In the single threaded case, sending to yourself will cause
deadlock. Note that in the runtime-thread case, this check
will not be made (long-term FIXME) */
# ifndef MPICH_IS_THREADED
{
/* --BEGIN ERROR HANDLING-- */
if (sreq != NULL && MPID_cc_get(sreq->cc) != 0)
{
mpi_errno = MPIR_Err_create_code(MPI_SUCCESS, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER,
"**dev|selfsenddeadlock", 0);
goto fn_exit;
}
/* --END ERROR HANDLING-- */
}
# endif
goto fn_exit;
}
if (rank == MPI_PROC_NULL)
{
goto fn_exit;
}
MPIDI_Comm_get_vc_set_active(comm, rank, &vc);
#ifdef ENABLE_COMM_OVERRIDES
if (vc->comm_ops && vc->comm_ops->ssend)
{
mpi_errno = vc->comm_ops->ssend( vc, buf, count, datatype, rank, tag, comm, context_offset, &sreq);
goto fn_exit;
}
#endif
MPIDI_Datatype_get_info(count, datatype, dt_contig, data_sz, dt_ptr, dt_true_lb);
MPIDI_Request_create_sreq(sreq, mpi_errno, goto fn_exit);
MPIDI_Request_set_type(sreq, MPIDI_REQUEST_TYPE_SSEND);
if (data_sz == 0)
{
mpi_errno = MPIDI_CH3_EagerSyncZero( &sreq, rank, tag, comm,
context_offset );
goto fn_exit;
}
MPIDI_CH3_GET_EAGER_THRESHOLD(&eager_threshold, comm, vc);
if (data_sz + sizeof(MPIDI_CH3_Pkt_eager_sync_send_t) <= eager_threshold)
{
mpi_errno = MPIDI_CH3_EagerSyncNoncontigSend( &sreq, buf, count,
datatype, data_sz,
dt_contig, dt_true_lb,
rank, tag, comm,
context_offset );
}
else
{
/* Note that the sreq was created above */
mpi_errno = vc->rndvSend_fn( &sreq, buf, count, datatype, dt_contig,
data_sz, dt_true_lb, rank, tag, comm,
context_offset );
/* Note that we don't increase the ref cound on the datatype
because this is a blocking call, and the calling routine
must wait until sreq completes */
}
//.........这里部分代码省略.........
示例8: MPIDU_Datatype_set_contents
/*@
MPIDU_Datatype_set_contents - store contents information for use in
MPI_Type_get_contents.
Returns MPI_SUCCESS on success, MPI error code on error.
@*/
int MPIDU_Datatype_set_contents(MPIDU_Datatype *new_dtp,
int combiner,
int nr_ints,
int nr_aints,
int nr_types,
int array_of_ints[],
const MPI_Aint array_of_aints[],
const MPI_Datatype array_of_types[])
{
int i, contents_size, align_sz = 8, epsilon, mpi_errno;
int struct_sz, ints_sz, aints_sz, types_sz;
MPIDU_Datatype_contents *cp;
MPIDU_Datatype *old_dtp;
char *ptr;
#ifdef HAVE_MAX_STRUCT_ALIGNMENT
if (align_sz > HAVE_MAX_STRUCT_ALIGNMENT) {
align_sz = HAVE_MAX_STRUCT_ALIGNMENT;
}
#endif
struct_sz = sizeof(MPIDU_Datatype_contents);
types_sz = nr_types * sizeof(MPI_Datatype);
ints_sz = nr_ints * sizeof(int);
aints_sz = nr_aints * sizeof(MPI_Aint);
/* pad the struct, types, and ints before we allocate.
*
* note: it's not necessary that we pad the aints,
* because they are last in the region.
*/
if ((epsilon = struct_sz % align_sz)) {
struct_sz += align_sz - epsilon;
}
if ((epsilon = types_sz % align_sz)) {
types_sz += align_sz - epsilon;
}
if ((epsilon = ints_sz % align_sz)) {
ints_sz += align_sz - epsilon;
}
contents_size = struct_sz + types_sz + ints_sz + aints_sz;
cp = (MPIDU_Datatype_contents *) MPL_malloc(contents_size);
/* --BEGIN ERROR HANDLING-- */
if (cp == NULL) {
mpi_errno = MPIR_Err_create_code(MPI_SUCCESS,
MPIR_ERR_RECOVERABLE,
"MPIDU_Datatype_set_contents",
__LINE__,
MPI_ERR_OTHER,
"**nomem",
0);
return mpi_errno;
}
/* --END ERROR HANDLING-- */
cp->combiner = combiner;
cp->nr_ints = nr_ints;
cp->nr_aints = nr_aints;
cp->nr_types = nr_types;
/* arrays are stored in the following order: types, ints, aints,
* following the structure itself.
*/
ptr = ((char *) cp) + struct_sz;
/* Fortran90 combiner types do not have a "base" type */
if (nr_types > 0) {
MPIR_Memcpy(ptr, array_of_types, nr_types * sizeof(MPI_Datatype));
}
ptr = ((char *) cp) + struct_sz + types_sz;
if (nr_ints > 0) {
MPIR_Memcpy(ptr, array_of_ints, nr_ints * sizeof(int));
}
ptr = ((char *) cp) + struct_sz + types_sz + ints_sz;
if (nr_aints > 0) {
MPIR_Memcpy(ptr, array_of_aints, nr_aints * sizeof(MPI_Aint));
}
new_dtp->contents = cp;
/* increment reference counts on all the derived types used here */
for (i=0; i < nr_types; i++) {
if (HANDLE_GET_KIND(array_of_types[i]) != HANDLE_KIND_BUILTIN) {
MPIDU_Datatype_get_ptr(array_of_types[i], old_dtp);
MPIDU_Datatype_add_ref(old_dtp);
}
}
return MPI_SUCCESS;
}
示例9: structure
//.........这里部分代码省略.........
.N SignalSafe
.N Fortran
.N Errors
.N MPI_SUCCESS
.N MPI_ERR_TOPOLOGY
.N MPI_ERR_COMM
.N MPI_ERR_ARG
@*/
int MPI_Graph_get(MPI_Comm comm, int maxindex, int maxedges, int indx[], int edges[])
{
int mpi_errno = MPI_SUCCESS;
MPIR_Comm *comm_ptr = NULL;
MPIR_Topology *topo_ptr;
int i, n, *vals;
MPIR_FUNC_TERSE_STATE_DECL(MPID_STATE_MPI_GRAPH_GET);
MPIR_ERRTEST_INITIALIZED_ORDIE();
MPIR_FUNC_TERSE_ENTER(MPID_STATE_MPI_GRAPH_GET);
/* Validate parameters, especially handles needing to be converted */
#ifdef HAVE_ERROR_CHECKING
{
MPID_BEGIN_ERROR_CHECKS;
{
MPIR_ERRTEST_COMM(comm, mpi_errno);
}
MPID_END_ERROR_CHECKS;
}
#endif
/* Convert MPI object handles to object pointers */
MPIR_Comm_get_ptr(comm, comm_ptr);
/* Validate parameters and objects (post conversion) */
#ifdef HAVE_ERROR_CHECKING
{
MPID_BEGIN_ERROR_CHECKS;
{
/* Validate comm_ptr */
MPIR_Comm_valid_ptr(comm_ptr, mpi_errno, TRUE);
if (mpi_errno)
goto fn_fail;
/* If comm_ptr is not valid, it will be reset to null */
MPIR_ERRTEST_ARGNULL(edges, "edges", mpi_errno);
MPIR_ERRTEST_ARGNULL(indx, "indx", mpi_errno);
}
MPID_END_ERROR_CHECKS;
}
#endif /* HAVE_ERROR_CHECKING */
/* ... body of routine ... */
topo_ptr = MPIR_Topology_get(comm_ptr);
MPIR_ERR_CHKANDJUMP((!topo_ptr ||
topo_ptr->kind != MPI_GRAPH), mpi_errno, MPI_ERR_TOPOLOGY,
"**notgraphtopo");
MPIR_ERR_CHKANDJUMP3((topo_ptr->topo.graph.nnodes > maxindex), mpi_errno, MPI_ERR_ARG,
"**argtoosmall", "**argtoosmall %s %d %d", "maxindex", maxindex,
topo_ptr->topo.graph.nnodes);
MPIR_ERR_CHKANDJUMP3((topo_ptr->topo.graph.nedges > maxedges), mpi_errno, MPI_ERR_ARG,
"**argtoosmall", "**argtoosmall %s %d %d", "maxedges", maxedges,
topo_ptr->topo.graph.nedges);
/* Get index */
n = topo_ptr->topo.graph.nnodes;
vals = topo_ptr->topo.graph.index;
for (i = 0; i < n; i++)
*indx++ = *vals++;
/* Get edges */
n = topo_ptr->topo.graph.nedges;
vals = topo_ptr->topo.graph.edges;
for (i = 0; i < n; i++)
*edges++ = *vals++;
/* ... end of body of routine ... */
fn_exit:
MPIR_FUNC_TERSE_EXIT(MPID_STATE_MPI_GRAPH_GET);
return mpi_errno;
fn_fail:
/* --BEGIN ERROR HANDLING-- */
#ifdef HAVE_ERROR_CHECKING
{
mpi_errno =
MPIR_Err_create_code(mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER,
"**mpi_graph_get", "**mpi_graph_get %C %d %d %p %p", comm,
maxindex, maxedges, indx, edges);
}
#endif
mpi_errno = MPIR_Err_return_comm(comm_ptr, FCNAME, mpi_errno);
goto fn_exit;
/* --END ERROR HANDLING-- */
}
示例10: buffer
//.........这里部分代码省略.........
.N MPI_ERR_COMM
.N MPI_ERR_EXHAUSTED
.seealso: MPI_Start, MPI_Startall, MPI_Request_free
@*/
int MPI_Recv_init(void *buf, int count, MPI_Datatype datatype, int source,
int tag, MPI_Comm comm, MPI_Request * request)
{
int mpi_errno = MPI_SUCCESS;
MPIR_Comm *comm_ptr = NULL;
MPIR_Request *request_ptr = NULL;
MPIR_FUNC_TERSE_STATE_DECL(MPID_STATE_MPI_RECV_INIT);
MPIR_ERRTEST_INITIALIZED_ORDIE();
MPID_THREAD_CS_ENTER(GLOBAL, MPIR_THREAD_GLOBAL_ALLFUNC_MUTEX);
MPIR_FUNC_TERSE_PT2PT_ENTER(MPID_STATE_MPI_RECV_INIT);
/* Validate handle parameters needing to be converted */
#ifdef HAVE_ERROR_CHECKING
{
MPID_BEGIN_ERROR_CHECKS;
{
MPIR_ERRTEST_COMM(comm, mpi_errno);
}
MPID_END_ERROR_CHECKS;
}
#endif /* HAVE_ERROR_CHECKING */
/* Convert MPI object handles to object pointers */
MPIR_Comm_get_ptr(comm, comm_ptr);
/* Validate parameters if error checking is enabled */
#ifdef HAVE_ERROR_CHECKING
{
MPID_BEGIN_ERROR_CHECKS;
{
MPIR_Comm_valid_ptr(comm_ptr, mpi_errno, FALSE);
if (mpi_errno)
goto fn_fail;
MPIR_ERRTEST_COUNT(count, mpi_errno);
MPIR_ERRTEST_RECV_RANK(comm_ptr, source, mpi_errno);
MPIR_ERRTEST_RECV_TAG(tag, mpi_errno);
MPIR_ERRTEST_ARGNULL(request, "request", mpi_errno);
/* Validate datatype handle */
MPIR_ERRTEST_DATATYPE(datatype, "datatype", mpi_errno);
/* Validate datatype object */
if (HANDLE_GET_KIND(datatype) != HANDLE_KIND_BUILTIN) {
MPIR_Datatype *datatype_ptr = NULL;
MPIR_Datatype_get_ptr(datatype, datatype_ptr);
MPIR_Datatype_valid_ptr(datatype_ptr, mpi_errno);
if (mpi_errno)
goto fn_fail;
MPIR_Datatype_committed_ptr(datatype_ptr, mpi_errno);
if (mpi_errno)
goto fn_fail;
}
/* Validate buffer */
MPIR_ERRTEST_USERBUFFER(buf, count, datatype, mpi_errno);
}
MPID_END_ERROR_CHECKS;
}
#endif /* HAVE_ERROR_CHECKING */
/* ... body of routine ... */
mpi_errno = MPID_Recv_init(buf, count, datatype, source, tag, comm_ptr,
MPIR_CONTEXT_INTRA_PT2PT, &request_ptr);
if (mpi_errno != MPI_SUCCESS)
goto fn_fail;
/* return the handle of the request to the user */
MPIR_OBJ_PUBLISH_HANDLE(*request, request_ptr->handle);
/* ... end of body of routine ... */
fn_exit:
MPIR_FUNC_TERSE_PT2PT_EXIT(MPID_STATE_MPI_RECV_INIT);
MPID_THREAD_CS_EXIT(GLOBAL, MPIR_THREAD_GLOBAL_ALLFUNC_MUTEX);
return mpi_errno;
fn_fail:
/* --BEGIN ERROR HANDLING-- */
#ifdef HAVE_ERROR_CHECKING
{
mpi_errno =
MPIR_Err_create_code(mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER,
"**mpi_recv_init", "**mpi_recv_init %p %d %D %i %t %C %p", buf,
count, datatype, source, tag, comm, request);
}
#endif
mpi_errno = MPIR_Err_return_comm(comm_ptr, FCNAME, mpi_errno);
goto fn_exit;
/* --END ERROR HANDLING-- */
}
示例11: object
/*@
MPI_Win_get_info - Returns a new info object containing the hints of the window
associated with win.
The current setting of all hints actually used by the system related to this
window is returned in info_used. If no such hints exist, a handle to a newly
created info object is returned that contains no key/value pair. The user is
responsible for freeing info_used via 'MPI_Info_free'.
Input Parameters:
. win - window object (handle)
Output Parameters:
. info_used - new info argument (handle)
Notes:
The info object returned in info_used will contain all hints currently active
for this window. This set of hints may be greater or smaller than the set of
hints specified when the window was created, as the system may not recognize
some hints set by the user, and may recognize other hints that the user has not
set.
.N ThreadSafe
.N Fortran
.N Errors
.N MPI_SUCCESS
.N MPI_ERR_ARG
.N MPI_ERR_WIN
.N MPI_ERR_INFO
.N MPI_ERR_OTHER
.seealso: MPI_Win_set_info
@*/
int MPI_Win_get_info(MPI_Win win, MPI_Info * info_used)
{
int mpi_errno = MPI_SUCCESS;
MPIR_Win *win_ptr = NULL;
MPIR_Info *info_ptr = NULL;
MPIR_FUNC_TERSE_STATE_DECL(MPID_STATE_MPI_WIN_GET_INFO);
MPIR_ERRTEST_INITIALIZED_ORDIE();
MPID_THREAD_CS_ENTER(GLOBAL, MPIR_THREAD_GLOBAL_ALLFUNC_MUTEX);
MPIR_FUNC_TERSE_RMA_ENTER(MPID_STATE_MPI_WIN_GET_INFO);
/* Validate parameters, especially handles needing to be converted */
#ifdef HAVE_ERROR_CHECKING
{
MPID_BEGIN_ERROR_CHECKS;
{
MPIR_ERRTEST_ARGNULL(info_used, "info", mpi_errno);
}
MPID_END_ERROR_CHECKS;
}
#endif /* HAVE_ERROR_CHECKING */
/* Convert MPI object handles to object pointers */
MPIR_Win_get_ptr(win, win_ptr);
/* Validate parameters and objects (post conversion) */
#ifdef HAVE_ERROR_CHECKING
{
MPID_BEGIN_ERROR_CHECKS;
{
/* Validate pointers */
MPIR_Win_valid_ptr(win_ptr, mpi_errno);
if (mpi_errno)
goto fn_fail;
}
MPID_END_ERROR_CHECKS;
}
#endif /* HAVE_ERROR_CHECKING */
/* ... body of routine ... */
mpi_errno = MPID_Win_get_info(win_ptr, &info_ptr);
if (mpi_errno != MPI_SUCCESS)
goto fn_fail;
*info_used = info_ptr->handle;
/* ... end of body of routine ... */
fn_exit:
MPIR_FUNC_TERSE_RMA_EXIT(MPID_STATE_MPI_WIN_GET_INFO);
MPID_THREAD_CS_EXIT(GLOBAL, MPIR_THREAD_GLOBAL_ALLFUNC_MUTEX);
return mpi_errno;
fn_fail:
/* --BEGIN ERROR HANDLING-- */
#ifdef HAVE_ERROR_CHECKING
{
mpi_errno =
MPIR_Err_create_code(mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER,
"**mpi_win_get_info", "**mpi_win_get_info %W %p", win, info_used);
}
//.........这里部分代码省略.........
示例12: buffer
//.........这里部分代码省略.........
MPID_Comm *comm_ptr = NULL;
MPID_Request * request_ptr = NULL;
MPID_MPI_STATE_DECL(MPID_STATE_MPI_SSEND);
MPIR_ERRTEST_INITIALIZED_ORDIE();
MPIU_THREAD_CS_ENTER(ALLFUNC,);
MPID_MPI_PT2PT_FUNC_ENTER_FRONT(MPID_STATE_MPI_SSEND);
/* Validate handle parameters needing to be converted */
# ifdef HAVE_ERROR_CHECKING
{
MPID_BEGIN_ERROR_CHECKS;
{
MPIR_ERRTEST_COMM(comm, mpi_errno);
if (mpi_errno) goto fn_fail;
}
MPID_END_ERROR_CHECKS;
}
# endif /* HAVE_ERROR_CHECKING */
/* Convert MPI object handles to object pointers */
MPID_Comm_get_ptr( comm, comm_ptr );
/* Validate parameters if error checking is enabled */
# ifdef HAVE_ERROR_CHECKING
{
MPID_BEGIN_ERROR_CHECKS;
{
MPID_Comm_valid_ptr( comm_ptr, mpi_errno );
if (mpi_errno) goto fn_fail;
MPIR_ERRTEST_COUNT(count, mpi_errno);
MPIR_ERRTEST_SEND_RANK(comm_ptr, dest, mpi_errno);
MPIR_ERRTEST_SEND_TAG(tag, mpi_errno);
if (mpi_errno) goto fn_fail;
/* Validate datatype handle */
MPIR_ERRTEST_DATATYPE(datatype, "datatype", mpi_errno);
/* Validate datatype object */
if (HANDLE_GET_KIND(datatype) != HANDLE_KIND_BUILTIN)
{
MPID_Datatype *datatype_ptr = NULL;
MPID_Datatype_get_ptr(datatype, datatype_ptr);
MPID_Datatype_valid_ptr(datatype_ptr, mpi_errno);
MPID_Datatype_committed_ptr(datatype_ptr, mpi_errno);
if (mpi_errno) goto fn_fail;
}
/* Validate buffer */
MPIR_ERRTEST_USERBUFFER(buf,count,datatype,mpi_errno);
if (mpi_errno) goto fn_fail;
}
MPID_END_ERROR_CHECKS;
}
# endif /* HAVE_ERROR_CHECKING */
/* ... body of routine ... */
mpi_errno = MPID_Ssend(buf, count, datatype, dest, tag, comm_ptr,
MPID_CONTEXT_INTRA_PT2PT, &request_ptr);
if (mpi_errno != MPI_SUCCESS) goto fn_fail;
if (request_ptr == NULL)
{
goto fn_exit;
}
/* If a request was returned, then we need to block until the request
is complete */
mpi_errno = MPIR_Progress_wait_request(request_ptr);
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
mpi_errno = request_ptr->status.MPI_ERROR;
MPID_Request_release(request_ptr);
if (mpi_errno != MPI_SUCCESS) goto fn_fail;
/* ... end of body of routine ... */
fn_exit:
MPID_MPI_PT2PT_FUNC_EXIT(MPID_STATE_MPI_SSEND);
MPIU_THREAD_CS_EXIT(ALLFUNC,);
return mpi_errno;
fn_fail:
/* --BEGIN ERROR HANDLING-- */
# ifdef HAVE_ERROR_CHECKING
{
mpi_errno = MPIR_Err_create_code(
mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**mpi_ssend",
"**mpi_ssend %p %d %D %i %t %C", buf, count, datatype, dest, tag, comm);
}
# endif
mpi_errno = MPIR_Err_return_comm( comm_ptr, FCNAME, mpi_errno );
goto fn_exit;
/* --END ERROR HANDLING-- */
}
示例13: buffer
//.........这里部分代码省略.........
if (mpi_errno != MPI_SUCCESS)
goto fn_fail;
}
for (i = 0; i < comm_size; i++) {
if (recvcounts[i] > 0) {
MPIR_ERRTEST_RECVBUF_INPLACE(recvbuf, recvcounts[i], mpi_errno);
MPIR_ERRTEST_USERBUFFER(recvbuf, recvcounts[i], recvtype, mpi_errno);
break;
}
}
/* catch common aliasing cases */
if (sendbuf != MPI_IN_PLACE && sendtype == recvtype &&
recvcounts[comm_ptr->rank] != 0 && sendcount != 0) {
int recvtype_size;
MPIR_Datatype_get_size_macro(recvtype, recvtype_size);
MPIR_ERRTEST_ALIAS_COLL(sendbuf,
(char *) recvbuf +
displs[comm_ptr->rank] * recvtype_size, mpi_errno);
}
} else
MPIR_ERRTEST_SENDBUF_INPLACE(sendbuf, sendcount, mpi_errno);
}
if (comm_ptr->comm_kind == MPIR_COMM_KIND__INTERCOMM) {
MPIR_ERRTEST_INTER_ROOT(comm_ptr, root, mpi_errno);
if (root == MPI_ROOT) {
comm_size = comm_ptr->remote_size;
for (i = 0; i < comm_size; i++) {
MPIR_ERRTEST_COUNT(recvcounts[i], mpi_errno);
MPIR_ERRTEST_DATATYPE(recvtype, "recvtype", mpi_errno);
}
if (HANDLE_GET_KIND(recvtype) != HANDLE_KIND_BUILTIN) {
MPIR_Datatype_get_ptr(recvtype, recvtype_ptr);
MPIR_Datatype_valid_ptr(recvtype_ptr, mpi_errno);
if (mpi_errno != MPI_SUCCESS)
goto fn_fail;
MPIR_Datatype_committed_ptr(recvtype_ptr, mpi_errno);
if (mpi_errno != MPI_SUCCESS)
goto fn_fail;
}
for (i = 0; i < comm_size; i++) {
if (recvcounts[i] > 0) {
MPIR_ERRTEST_RECVBUF_INPLACE(recvbuf, recvcounts[i], mpi_errno);
MPIR_ERRTEST_USERBUFFER(recvbuf, recvcounts[i], recvtype, mpi_errno);
break;
}
}
} else if (root != MPI_PROC_NULL) {
MPIR_ERRTEST_COUNT(sendcount, mpi_errno);
MPIR_ERRTEST_DATATYPE(sendtype, "sendtype", mpi_errno);
if (HANDLE_GET_KIND(sendtype) != HANDLE_KIND_BUILTIN) {
MPIR_Datatype_get_ptr(sendtype, sendtype_ptr);
MPIR_Datatype_valid_ptr(sendtype_ptr, mpi_errno);
if (mpi_errno != MPI_SUCCESS)
goto fn_fail;
MPIR_Datatype_committed_ptr(sendtype_ptr, mpi_errno);
if (mpi_errno != MPI_SUCCESS)
goto fn_fail;
}
MPIR_ERRTEST_SENDBUF_INPLACE(sendbuf, sendcount, mpi_errno);
MPIR_ERRTEST_USERBUFFER(sendbuf, sendcount, sendtype, mpi_errno);
}
}
}
MPID_END_ERROR_CHECKS;
}
#endif /* HAVE_ERROR_CHECKING */
/* ... body of routine ... */
mpi_errno = MPIR_Gatherv(sendbuf, sendcount, sendtype,
recvbuf, recvcounts, displs, recvtype, root, comm_ptr, &errflag);
if (mpi_errno)
goto fn_fail;
/* ... end of body of routine ... */
fn_exit:
MPIR_FUNC_TERSE_COLL_EXIT(MPID_STATE_MPI_GATHERV);
MPID_THREAD_CS_EXIT(GLOBAL, MPIR_THREAD_GLOBAL_ALLFUNC_MUTEX);
return mpi_errno;
fn_fail:
/* --BEGIN ERROR HANDLING-- */
#ifdef HAVE_ERROR_CHECKING
{
mpi_errno =
MPIR_Err_create_code(mpi_errno, MPIR_ERR_RECOVERABLE, __func__, __LINE__, MPI_ERR_OTHER,
"**mpi_gatherv", "**mpi_gatherv %p %d %D %p %p %p %D %d %C",
sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs,
recvtype, root, comm);
}
#endif
mpi_errno = MPIR_Err_return_comm(comm_ptr, __func__, mpi_errno);
goto fn_exit;
/* --END ERROR HANDLING-- */
}
示例14: edge
/*@
MPI_Neighbor_alltoall - In this function, each process i receives data items
from each process j if an edge (j,i) exists in the topology graph or Cartesian
topology. Similarly, each process i sends data items to all processes j where an
edge (i,j) exists. This call is more general than MPI_NEIGHBOR_ALLGATHER in that
different data items can be sent to each neighbor. The k-th block in send buffer
is sent to the k-th neighboring process and the l-th block in the receive buffer
is received from the l-th neighbor.
Input Parameters:
+ sendbuf - starting address of the send buffer (choice)
. sendcount - number of elements sent to each neighbor (non-negative integer)
. sendtype - data type of send buffer elements (handle)
. recvcount - number of elements received from each neighbor (non-negative integer)
. recvtype - data type of receive buffer elements (handle)
- comm - communicator (handle)
Output Parameters:
. recvbuf - starting address of the receive buffer (choice)
.N ThreadSafe
.N Fortran
.N Errors
@*/
int MPI_Neighbor_alltoall(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, MPI_Comm comm)
{
int mpi_errno = MPI_SUCCESS;
MPIR_Comm *comm_ptr = NULL;
MPIR_FUNC_TERSE_STATE_DECL(MPID_STATE_MPI_NEIGHBOR_ALLTOALL);
MPID_THREAD_CS_ENTER(GLOBAL, MPIR_THREAD_GLOBAL_ALLFUNC_MUTEX);
MPIR_FUNC_TERSE_ENTER(MPID_STATE_MPI_NEIGHBOR_ALLTOALL);
/* Validate parameters, especially handles needing to be converted */
# ifdef HAVE_ERROR_CHECKING
{
MPID_BEGIN_ERROR_CHECKS
{
MPIR_ERRTEST_DATATYPE(sendtype, "sendtype", mpi_errno);
MPIR_ERRTEST_DATATYPE(recvtype, "recvtype", mpi_errno);
MPIR_ERRTEST_COMM(comm, mpi_errno);
/* TODO more checks may be appropriate */
}
MPID_END_ERROR_CHECKS
}
# endif /* HAVE_ERROR_CHECKING */
/* Convert MPI object handles to object pointers */
MPIR_Comm_get_ptr(comm, comm_ptr);
/* Validate parameters and objects (post conversion) */
# ifdef HAVE_ERROR_CHECKING
{
MPID_BEGIN_ERROR_CHECKS
{
if (HANDLE_GET_KIND(sendtype) != HANDLE_KIND_BUILTIN) {
MPIR_Datatype *sendtype_ptr = NULL;
MPID_Datatype_get_ptr(sendtype, sendtype_ptr);
MPIR_Datatype_valid_ptr(sendtype_ptr, mpi_errno);
MPID_Datatype_committed_ptr(sendtype_ptr, mpi_errno);
}
if (HANDLE_GET_KIND(recvtype) != HANDLE_KIND_BUILTIN) {
MPIR_Datatype *recvtype_ptr = NULL;
MPID_Datatype_get_ptr(recvtype, recvtype_ptr);
MPIR_Datatype_valid_ptr(recvtype_ptr, mpi_errno);
MPID_Datatype_committed_ptr(recvtype_ptr, mpi_errno);
}
MPIR_Comm_valid_ptr( comm_ptr, mpi_errno, FALSE );
/* TODO more checks may be appropriate (counts, in_place, buffer aliasing, etc) */
if (mpi_errno != MPI_SUCCESS) goto fn_fail;
}
MPID_END_ERROR_CHECKS
}
# endif /* HAVE_ERROR_CHECKING */
/* ... body of routine ... */
mpi_errno = MPID_Neighbor_alltoall(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm_ptr);
if (mpi_errno) MPIR_ERR_POP(mpi_errno);
/* ... end of body of routine ... */
fn_exit:
MPIR_FUNC_TERSE_EXIT(MPID_STATE_MPI_NEIGHBOR_ALLTOALL);
MPID_THREAD_CS_EXIT(GLOBAL, MPIR_THREAD_GLOBAL_ALLFUNC_MUTEX);
return mpi_errno;
fn_fail:
/* --BEGIN ERROR HANDLING-- */
# ifdef HAVE_ERROR_CHECKING
{
mpi_errno = MPIR_Err_create_code(
mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER,
"**mpi_neighbor_alltoall", "**mpi_neighbor_alltoall %p %d %D %p %d %D %C", sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm);
}
//.........这里部分代码省略.........
示例15: MPIDI_CH3_Connect_to_root
int MPIDI_CH3_Connect_to_root(const char* port_name, MPIDI_VC_t** new_vc)
{
int mpi_errno = MPI_SUCCESS;
int str_errno;
char ifname[MAX_HOST_DESCRIPTION_LEN];
MPIDI_VC_t *vc;
MPIDI_CH3_Pkt_cm_establish_t pkt;
MPID_Request * sreq;
int seqnum;
MPIDI_STATE_DECL(MPID_STATE_MPIDI_CH3_CONNECT_TO_ROOT);
MPIDI_FUNC_ENTER(MPID_STATE_MPIDI_CH3_CONNECT_TO_ROOT);
*new_vc = NULL;
if (!MPIDI_CH3I_Process.has_dpm)
return MPIR_Err_create_code(MPI_SUCCESS, MPIR_ERR_FATAL, FCNAME,
__LINE__, MPI_ERR_OTHER, "**notimpl", 0);
str_errno = MPIU_Str_get_string_arg(port_name,
MPIDI_CH3I_HOST_DESCRIPTION_KEY,
ifname, MAX_HOST_DESCRIPTION_LEN);
if (str_errno != MPIU_STR_SUCCESS) {
/* --BEGIN ERROR HANDLING */
if (str_errno == MPIU_STR_FAIL) {
MPIU_ERR_SETANDJUMP(mpi_errno,MPI_ERR_OTHER,"**argstr_missinghost");
}
else {
/* MPIU_STR_TRUNCATED or MPIU_STR_NONEM */
MPIU_ERR_SETANDJUMP(mpi_errno,MPI_ERR_OTHER, "**argstr_hostd");
}
/* --END ERROR HANDLING-- */
}
vc = MPIU_Malloc(sizeof(MPIDI_VC_t));
if (!vc) {
MPIU_ERR_SETANDJUMP(mpi_errno,MPI_ERR_OTHER, "**nomem");
}
MPIDI_VC_Init(vc, NULL, 0);
mpi_errno = MPIDI_CH3I_CM_Connect_raw_vc(vc, ifname);
if (mpi_errno) {
MPIU_ERR_POP(mpi_errno);
}
while (vc->ch.state != MPIDI_CH3I_VC_STATE_IDLE) {
mpi_errno = MPID_Progress_test();
/* --BEGIN ERROR HANDLING-- */
if (mpi_errno != MPI_SUCCESS) {
MPIU_ERR_POP(mpi_errno);
}
}
/* fprintf(stderr, "[###] vc state to idel, now send cm_establish msg\n") */
/* Now a connection is created, send a cm_establish message */
/* FIXME: vc->mrail.remote_vc_addr is used to find remote vc
* A more elegant way is needed */
MPIDI_Pkt_init(&pkt, MPIDI_CH3_PKT_CM_ESTABLISH);
MPIDI_VC_FAI_send_seqnum(vc, seqnum);
MPIDI_Pkt_set_seqnum(&pkt, seqnum);
pkt.vc_addr = vc->mrail.remote_vc_addr;
mpi_errno = MPIDI_GetTagFromPort(port_name, &pkt.port_name_tag);
if (mpi_errno != MPIU_STR_SUCCESS) {
MPIU_ERR_SETANDJUMP(mpi_errno,MPI_ERR_OTHER, "**argstr_port_name_tag");
}
mpi_errno = MPIDI_CH3_iStartMsg(vc, &pkt, sizeof(pkt), &sreq);
if (mpi_errno != MPI_SUCCESS)
{
MPIU_ERR_SETANDJUMP1(mpi_errno,MPI_ERR_OTHER,"**fail", "**fail %s",
"Failed to send cm establish message");
}
if (sreq != NULL)
{
if (sreq->status.MPI_ERROR != MPI_SUCCESS)
{
mpi_errno = MPIR_Err_create_code(sreq->status.MPI_ERROR,
MPIR_ERR_FATAL, FCNAME, __LINE__,
MPI_ERR_OTHER,
"**fail", 0);
MPID_Request_release(sreq);
goto fn_fail;
}
MPID_Request_release(sreq);
}
*new_vc = vc;
fn_fail:
MPIDI_FUNC_EXIT(MPID_STATE_MPIDI_CH3_CONNECT_TO_ROOT);
return mpi_errno;
}