本文整理汇总了C++中MPID_Comm_get_ptr函数的典型用法代码示例。如果您正苦于以下问题:C++ MPID_Comm_get_ptr函数的具体用法?C++ MPID_Comm_get_ptr怎么用?C++ MPID_Comm_get_ptr使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了MPID_Comm_get_ptr函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: MPIDO_Barrier_gi
int
MPIDO_Barrier_gi(MPID_Comm * comm)
{
int rc;
MPID_Comm * comm_world;
MPID_Comm_get_ptr(MPI_COMM_WORLD, comm_world);
DCMF_Callback_t callback = { barrier_cb_done,
(void *) &mpid_globalbarrier_active };
/* initialize global active field */
mpid_globalbarrier_active = 1;
if (mpid_globalbarrier_restart)
rc = DCMF_Restart (&mpid_globalbarrier_request);
else
{
mpid_globalbarrier_restart = 1;
rc = DCMF_GlobalBarrier(&MPIDI_Protocols.globalbarrier,
&mpid_globalbarrier_request, callback);
}
if (rc == DCMF_SUCCESS)
MPID_PROGRESS_WAIT_WHILE(* (int *) callback.clientdata);
return rc;
}
示例2: MPIC_Send
int MPIC_Send(void *buf, int count, MPI_Datatype datatype, int dest, int tag,
MPI_Comm comm)
{
int mpi_errno, context_id;
MPID_Request *request_ptr=NULL;
MPID_Comm *comm_ptr=NULL;
MPIDI_STATE_DECL(MPID_STATE_MPIC_SEND);
MPIDI_PT2PT_FUNC_ENTER_FRONT(MPID_STATE_MPIC_SEND);
MPID_Comm_get_ptr( comm, comm_ptr );
context_id = (comm_ptr->comm_kind == MPID_INTRACOMM) ?
MPID_CONTEXT_INTRA_COLL : MPID_CONTEXT_INTER_COLL;
mpi_errno = MPID_Send(buf, count, datatype, dest, tag, comm_ptr,
context_id, &request_ptr);
if (mpi_errno) { MPIU_ERR_POP(mpi_errno); }
if (request_ptr) {
mpi_errno = MPIC_Wait(request_ptr);
if (mpi_errno) { MPIU_ERR_POP(mpi_errno); }
MPID_Request_release(request_ptr);
}
fn_exit:
MPIDI_PT2PT_FUNC_EXIT(MPID_STATE_MPIC_SEND);
return mpi_errno;
fn_fail:
/* --BEGIN ERROR HANDLING-- */
if (request_ptr) {
MPID_Request_release(request_ptr);
}
goto fn_exit;
/* --END ERROR HANDLING-- */
}
示例3: MPIC_Sendrecv_replace
int MPIC_Sendrecv_replace(void *buf, int count, MPI_Datatype datatype,
int dest, int sendtag,
int source, int recvtag,
MPI_Comm comm, MPI_Status *status)
{
int mpi_errno = MPI_SUCCESS;
MPIR_Context_id_t context_id_offset;
MPID_Request *sreq;
MPID_Request *rreq;
void *tmpbuf = NULL;
int tmpbuf_size = 0;
int tmpbuf_count = 0;
MPID_Comm *comm_ptr;
MPIU_CHKLMEM_DECL(1);
MPIDI_STATE_DECL(MPID_STATE_MPIC_SENDRECV_REPLACE);
#ifdef MPID_LOG_ARROWS
/* The logging macros log sendcount and recvcount */
int sendcount = count, recvcount = count;
#endif
MPIDI_PT2PT_FUNC_ENTER_BOTH(MPID_STATE_MPIC_SENDRECV_REPLACE);
MPID_Comm_get_ptr( comm, comm_ptr );
context_id_offset = (comm_ptr->comm_kind == MPID_INTRACOMM) ?
MPID_CONTEXT_INTRA_COLL : MPID_CONTEXT_INTER_COLL;
if (count > 0 && dest != MPI_PROC_NULL)
{
MPIR_Pack_size_impl(count, datatype, &tmpbuf_size);
MPIU_CHKLMEM_MALLOC(tmpbuf, void *, tmpbuf_size, mpi_errno, "temporary send buffer");
mpi_errno = MPIR_Pack_impl(buf, count, datatype, tmpbuf, tmpbuf_size, &tmpbuf_count);
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
}
示例4: MPIDI_CH3U_Win_allocate_shared
int MPIDI_CH3U_Win_allocate_shared(MPI_Aint size, int disp_unit, MPID_Info *info, MPID_Comm *comm_ptr,
void **base_ptr, MPID_Win **win_ptr)
{
int mpi_errno = MPI_SUCCESS;
MPID_Comm *comm_self_ptr = NULL;
MPID_Group *group_comm, *group_self;
int result;
MPIU_CHKPMEM_DECL(1);
MPIDI_STATE_DECL(MPID_STATE_MPIDI_CH3U_WIN_ALLOCATE_SHARED);
MPIDI_RMA_FUNC_ENTER(MPID_STATE_MPIDI_CH3U_WIN_ALLOCATE_SHARED);
#ifdef HAVE_ERROR_CHECKING
/* The baseline CH3 implementation only works with MPI_COMM_SELF */
MPID_Comm_get_ptr( MPI_COMM_SELF, comm_self_ptr );
mpi_errno = MPIR_Comm_group_impl(comm_ptr, &group_comm);
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
mpi_errno = MPIR_Comm_group_impl(comm_self_ptr, &group_self);
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
mpi_errno = MPIR_Group_compare_impl(group_comm, group_self, &result);
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
mpi_errno = MPIR_Group_free_impl(group_comm);
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
mpi_errno = MPIR_Group_free_impl(group_self);
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
if (result != MPI_IDENT) {
MPIU_ERR_SETANDJUMP(mpi_errno, MPI_ERR_RMA_SHARED, "**ch3|win_shared_comm");
}
#endif
mpi_errno = MPIDI_CH3U_Win_allocate(size, disp_unit, info, comm_ptr,
base_ptr, win_ptr);
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
MPIU_CHKPMEM_MALLOC((*win_ptr)->shm_base_addrs, void **,
1 /* comm_size */ * sizeof(void *),
mpi_errno, "(*win_ptr)->shm_base_addrs");
(*win_ptr)->shm_base_addrs[0] = *base_ptr;
/* Register the shared memory window free function, which will free the
memory allocated here. */
(*win_ptr)->RMAFns.Win_free = MPIDI_SHM_Win_free;
fn_exit:
MPIDI_RMA_FUNC_EXIT(MPID_STATE_MPIDI_CH3U_WIN_ALLOCATE_SHARED);
return mpi_errno;
/* --BEGIN ERROR HANDLING-- */
fn_fail:
MPIU_CHKPMEM_REAP();
goto fn_exit;
/* --END ERROR HANDLING-- */
}
示例5: MPIC_Probe
int MPIC_Probe(int source, int tag, MPI_Comm comm, MPI_Status *status)
{
int mpi_errno = MPI_SUCCESS;
int context_id;
MPID_Comm *comm_ptr;
MPID_Comm_get_ptr( comm, comm_ptr );
context_id = (comm_ptr->comm_kind == MPID_INTRACOMM) ?
MPID_CONTEXT_INTRA_COLL : MPID_CONTEXT_INTER_COLL;
mpi_errno = MPID_Probe(source, tag, comm_ptr, context_id, status);
if (mpi_errno != MPI_SUCCESS) goto fn_fail;
fn_exit:
return mpi_errno;
fn_fail:
goto fn_exit;
}
示例6: MPIC_Recv
int MPIC_Recv(void *buf, int count, MPI_Datatype datatype, int source, int tag,
MPI_Comm comm, MPI_Status *status)
{
int mpi_errno, context_id;
MPID_Request *request_ptr=NULL;
MPID_Comm *comm_ptr = NULL;
MPIDI_STATE_DECL(MPID_STATE_MPIC_RECV);
MPIDI_PT2PT_FUNC_ENTER_BACK(MPID_STATE_MPIC_RECV);
MPID_Comm_get_ptr( comm, comm_ptr );
context_id = (comm_ptr->comm_kind == MPID_INTRACOMM) ?
MPID_CONTEXT_INTRA_COLL : MPID_CONTEXT_INTER_COLL;
mpi_errno = MPID_Recv(buf, count, datatype, source, tag, comm_ptr,
context_id, status, &request_ptr);
if (mpi_errno) { MPIU_ERR_POP(mpi_errno); }
if (request_ptr) {
mpi_errno = MPIC_Wait(request_ptr);
if (mpi_errno == MPI_SUCCESS) {
if (status != MPI_STATUS_IGNORE) {
*status = request_ptr->status;
}
mpi_errno = request_ptr->status.MPI_ERROR;
}
else { MPIU_ERR_POP(mpi_errno); }
MPID_Request_release(request_ptr);
}
fn_exit:
MPIDI_PT2PT_FUNC_EXIT_BACK(MPID_STATE_MPIC_RECV);
return mpi_errno;
fn_fail:
/* --BEGIN ERROR HANDLING-- */
if (request_ptr) {
MPID_Request_release(request_ptr);
}
goto fn_exit;
/* --END ERROR HANDLING-- */
}
示例7: MPIC_Sendrecv
int MPIC_Sendrecv(void *sendbuf, int sendcount, MPI_Datatype sendtype,
int dest, int sendtag, void *recvbuf, int recvcount,
MPI_Datatype recvtype, int source, int recvtag,
MPI_Comm comm, MPI_Status *status)
{
MPID_Request *recv_req_ptr=NULL, *send_req_ptr=NULL;
int mpi_errno, context_id;
MPID_Comm *comm_ptr = NULL;
MPIDI_STATE_DECL(MPID_STATE_MPIC_SENDRECV);
MPIDI_PT2PT_FUNC_ENTER_BOTH(MPID_STATE_MPIC_SENDRECV);
MPID_Comm_get_ptr( comm, comm_ptr );
context_id = (comm_ptr->comm_kind == MPID_INTRACOMM) ?
MPID_CONTEXT_INTRA_COLL : MPID_CONTEXT_INTER_COLL;
mpi_errno = MPID_Irecv(recvbuf, recvcount, recvtype, source, recvtag,
comm_ptr, context_id, &recv_req_ptr);
if (mpi_errno) { MPIU_ERR_POP(mpi_errno); }
mpi_errno = MPID_Isend(sendbuf, sendcount, sendtype, dest, sendtag,
comm_ptr, context_id, &send_req_ptr);
if (mpi_errno) { MPIU_ERR_POP(mpi_errno); }
mpi_errno = MPIC_Wait(send_req_ptr);
if (mpi_errno) { MPIU_ERR_POP(mpi_errno); }
mpi_errno = MPIC_Wait(recv_req_ptr);
if (mpi_errno) { MPIU_ERR_POPFATAL(mpi_errno); }
if (status != MPI_STATUS_IGNORE)
*status = recv_req_ptr->status;
mpi_errno = recv_req_ptr->status.MPI_ERROR;
MPID_Request_release(send_req_ptr);
MPID_Request_release(recv_req_ptr);
fn_fail:
/* --BEGIN ERROR HANDLING-- */
MPIDI_PT2PT_FUNC_EXIT_BOTH(MPID_STATE_MPIC_SENDRECV);
return mpi_errno;
/* --END ERROR HANDLING-- */
}
示例8: MPIDI_Comm_create_from_pami_geom
pami_result_t MPIDI_Comm_create_from_pami_geom(pami_geometry_range_t *task_slices,
size_t slice_count,
pami_geometry_t *geometry,
void **cookie)
{
int mpi_errno = MPI_SUCCESS;
int num_tasks = 0;
int *ranks = NULL;
MPID_Comm *comm_ptr = NULL, *new_comm_ptr = NULL;
MPID_Group *group_ptr = NULL, *new_group_ptr = NULL;
int i = 0, j = 0;
/* Get comm_ptr for MPI_COMM_WORLD and get the group_ptr for it */
MPID_Comm_get_ptr( MPI_COMM_WORLD, comm_ptr );
mpi_errno = MPIR_Comm_group_impl(comm_ptr, &group_ptr);
if (mpi_errno)
{
TRACE_ERR("Error while creating group_ptr from MPI_COMM_WORLD in MPIDI_Comm_create_from_pami_geom\n");
return PAMI_ERROR;
}
/* Create the ranks list from the pami_geometry_range_t array */
for(i = 0; i < slice_count; i++)
{
num_tasks += (task_slices[i].hi - task_slices[i].lo) + 1;
}
ranks = MPIU_Calloc0(num_tasks, int);
for(i = 0; i < slice_count; i++)
{
int slice_sz = (task_slices[i].hi - task_slices[i].lo) + 1;
int k = 0;
for(k = 0; k < slice_sz; k++)
{
ranks[j] = task_slices[i].lo + k;
j++;
}
}
/* Now we have all we need to create the new group. Create it */
mpi_errno = MPIR_Group_incl_impl(group_ptr, num_tasks, ranks, &new_group_ptr);
if (mpi_errno)
{
TRACE_ERR("Error while creating new_group_ptr from group_ptr in MPIDI_Comm_create_from_pami_geom\n");
return PAMI_ERROR;
}
/* Now create the communicator using the new_group_ptr */
mpi_errno = MPIR_Comm_create_intra(comm_ptr, new_group_ptr, &new_comm_ptr);
if (mpi_errno)
{
TRACE_ERR("Error while creating new_comm_ptr from group_ptr in MPIDI_Comm_create_from_pami_geom\n");
return PAMI_ERROR;
}
if(new_comm_ptr)
{
/* Get the geometry from the communicator and set the out parameters */
*geometry = new_comm_ptr->mpid.geometry;
*cookie = new_comm_ptr;
}
else
{
*geometry = PAMI_GEOMETRY_NULL;
*cookie = NULL;
}
/* Cleanup */
MPIU_TestFree(&ranks);
return PAMI_SUCCESS;
}
示例9: communicator
/*@
MPI_Iprobe - Nonblocking test for a message
Input Parameters:
+ source - source rank, or 'MPI_ANY_SOURCE' (integer)
. tag - tag value or 'MPI_ANY_TAG' (integer)
- comm - communicator (handle)
Output Parameters:
+ flag - True if a message with the specified source, tag, and communicator
is available (logical)
- status - status object (Status)
.N ThreadSafe
.N Fortran
.N Errors
.N MPI_SUCCESS
.N MPI_ERR_COMM
.N MPI_ERR_TAG
.N MPI_ERR_RANK
@*/
int MPI_Iprobe(int source, int tag, MPI_Comm comm, int *flag,
MPI_Status *status)
{
static const char FCNAME[] = "MPI_Iprobe";
int mpi_errno = MPI_SUCCESS;
MPID_Comm *comm_ptr = NULL;
MPID_MPI_STATE_DECL(MPID_STATE_MPI_IPROBE);
MPIR_ERRTEST_INITIALIZED_ORDIE();
MPIU_THREAD_CS_ENTER(ALLFUNC,);
MPID_MPI_PT2PT_FUNC_ENTER(MPID_STATE_MPI_IPROBE);
/* Validate handle parameters needing to be converted */
# ifdef HAVE_ERROR_CHECKING
{
MPID_BEGIN_ERROR_CHECKS;
{
MPIR_ERRTEST_COMM(comm, mpi_errno);
}
MPID_END_ERROR_CHECKS;
}
# endif /* HAVE_ERROR_CHECKING */
/* Convert MPI object handles to object pointers */
MPID_Comm_get_ptr( comm, comm_ptr );
/* Validate parameters if error checking is enabled */
# ifdef HAVE_ERROR_CHECKING
{
MPID_BEGIN_ERROR_CHECKS;
{
/* Validate communicator */
MPID_Comm_valid_ptr( comm_ptr, mpi_errno );
if (mpi_errno) goto fn_fail;
MPIR_ERRTEST_ARGNULL( flag, "flag", mpi_errno );
MPIR_ERRTEST_RECV_TAG(tag,mpi_errno);
if (comm_ptr) {
MPIR_ERRTEST_RECV_RANK(comm_ptr, source, mpi_errno);
}
}
MPID_END_ERROR_CHECKS;
}
# endif /* HAVE_ERROR_CHECKING */
/* ... body of routine ... */
/* FIXME: Is this correct for intercomms? */
mpi_errno = MPID_Iprobe(source, tag, comm_ptr, MPID_CONTEXT_INTRA_PT2PT,
flag, status);
if (mpi_errno != MPI_SUCCESS) goto fn_fail;
/* ... end of body of routine ... */
fn_exit:
MPID_MPI_PT2PT_FUNC_EXIT(MPID_STATE_MPI_IPROBE);
MPIU_THREAD_CS_EXIT(ALLFUNC,);
return mpi_errno;
fn_fail:
/* --BEGIN ERROR HANDLING-- */
# ifdef HAVE_ERROR_CHECKING
{
mpi_errno = MPIR_Err_create_code(
mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**mpi_iprobe",
"**mpi_iprobe %i %t %C %p %p", source, tag, comm, flag, status);
}
# endif
mpi_errno = MPIR_Err_return_comm( comm_ptr, FCNAME, mpi_errno );
goto fn_exit;
/* --END ERROR HANDLING-- */
}
示例10: buffer
/*@
MPI_Igatherv - XXX description here
Input Parameters:
+ sendbuf - starting address of the send buffer (choice)
. sendcount - number of elements in send buffer (non-negative integer)
. sendtype - data type of send buffer elements (handle)
. recvcounts - non-negative integer array (of length group size) containing the number of elements that are received from each process (significant only at root)
. displs - integer array (of length group size). Entry i specifies the displacement relative to recvbuf at which to place the incoming data from process i (significant only at root)
. recvtype - data type of receive buffer elements (significant only at root) (handle)
. root - rank of receiving process (integer)
- comm - communicator (handle)
Output Parameters:
+ recvbuf - starting address of the receive buffer (significant only at root) (choice)
- request - communication request (handle)
.N ThreadSafe
.N Fortran
.N Errors
@*/
int MPI_Igatherv(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf,
const int recvcounts[], const int displs[], MPI_Datatype recvtype, int root,
MPI_Comm comm, MPI_Request *request)
{
int mpi_errno = MPI_SUCCESS;
MPID_Comm *comm_ptr = NULL;
MPID_MPI_STATE_DECL(MPID_STATE_MPI_IGATHERV);
MPIU_THREAD_CS_ENTER(ALLFUNC,);
MPID_MPI_FUNC_ENTER(MPID_STATE_MPI_IGATHERV);
/* Validate parameters, especially handles needing to be converted */
# ifdef HAVE_ERROR_CHECKING
{
MPID_BEGIN_ERROR_CHECKS
{
MPIR_ERRTEST_COMM(comm, mpi_errno);
/* TODO more checks may be appropriate */
}
MPID_END_ERROR_CHECKS
}
# endif /* HAVE_ERROR_CHECKING */
/* Convert MPI object handles to object pointers */
MPID_Comm_get_ptr(comm, comm_ptr);
/* Validate parameters and objects (post conversion) */
# ifdef HAVE_ERROR_CHECKING
{
MPID_BEGIN_ERROR_CHECKS
{
MPID_Datatype *sendtype_ptr=NULL, *recvtype_ptr=NULL;
int i, rank, comm_size;
MPID_Comm_valid_ptr( comm_ptr, mpi_errno );
if (mpi_errno != MPI_SUCCESS) goto fn_fail;
if (comm_ptr->comm_kind == MPID_INTRACOMM) {
MPIR_ERRTEST_INTRA_ROOT(comm_ptr, root, mpi_errno);
if (sendbuf != MPI_IN_PLACE) {
MPIR_ERRTEST_COUNT(sendcount, mpi_errno);
MPIR_ERRTEST_DATATYPE(sendtype, "sendtype", mpi_errno);
if (HANDLE_GET_KIND(sendtype) != HANDLE_KIND_BUILTIN) {
MPID_Datatype_get_ptr(sendtype, sendtype_ptr);
MPID_Datatype_valid_ptr( sendtype_ptr, mpi_errno );
if (mpi_errno != MPI_SUCCESS) goto fn_fail;
MPID_Datatype_committed_ptr( sendtype_ptr, mpi_errno );
if (mpi_errno != MPI_SUCCESS) goto fn_fail;
}
MPIR_ERRTEST_USERBUFFER(sendbuf,sendcount,sendtype,mpi_errno);
}
rank = comm_ptr->rank;
if (rank == root) {
comm_size = comm_ptr->local_size;
for (i=0; i<comm_size; i++) {
MPIR_ERRTEST_COUNT(recvcounts[i], mpi_errno);
MPIR_ERRTEST_DATATYPE(recvtype, "recvtype", mpi_errno);
}
if (HANDLE_GET_KIND(recvtype) != HANDLE_KIND_BUILTIN) {
MPID_Datatype_get_ptr(recvtype, recvtype_ptr);
MPID_Datatype_valid_ptr( recvtype_ptr, mpi_errno );
if (mpi_errno != MPI_SUCCESS) goto fn_fail;
MPID_Datatype_committed_ptr( recvtype_ptr, mpi_errno );
if (mpi_errno != MPI_SUCCESS) goto fn_fail;
}
for (i=0; i<comm_size; i++) {
if (recvcounts[i] > 0) {
MPIR_ERRTEST_RECVBUF_INPLACE(recvbuf, recvcounts[i], mpi_errno);
MPIR_ERRTEST_USERBUFFER(recvbuf,recvcounts[i],recvtype,mpi_errno);
break;
}
}
}
//.........这里部分代码省略.........
示例11: communicator
/*@
MPI_Comm_idup - nonblocking communicator duplication
Input Parameters:
. comm - communicator (handle)
Output Parameters:
+ newcomm - copy of comm (handle)
- request - communication request (handle)
.N ThreadSafe
.N Fortran
.N Errors
@*/
int MPI_Comm_idup(MPI_Comm comm, MPI_Comm *newcomm, MPI_Request *request)
{
int mpi_errno = MPI_SUCCESS;
MPID_Comm *comm_ptr = NULL;
MPID_Comm *newcomm_ptr = NULL;
MPID_Request *dreq = NULL;
MPID_MPI_STATE_DECL(MPID_STATE_MPI_COMM_IDUP);
MPID_THREAD_CS_ENTER(GLOBAL, MPIR_THREAD_GLOBAL_ALLFUNC_MUTEX);
MPID_MPI_FUNC_ENTER(MPID_STATE_MPI_COMM_IDUP);
/* Validate parameters, especially handles needing to be converted */
# ifdef HAVE_ERROR_CHECKING
{
MPID_BEGIN_ERROR_CHECKS
{
MPIR_ERRTEST_COMM(comm, mpi_errno);
/* TODO more checks may be appropriate */
}
MPID_END_ERROR_CHECKS
}
# endif /* HAVE_ERROR_CHECKING */
/* Convert MPI object handles to object pointers */
MPID_Comm_get_ptr(comm, comm_ptr);
/* Validate parameters and objects (post conversion) */
# ifdef HAVE_ERROR_CHECKING
{
MPID_BEGIN_ERROR_CHECKS
{
MPID_Comm_valid_ptr( comm_ptr, mpi_errno, FALSE );
if (mpi_errno != MPI_SUCCESS) goto fn_fail;
MPIR_ERRTEST_ARGNULL(request, "request", mpi_errno);
/* TODO more checks may be appropriate (counts, in_place, buffer aliasing, etc) */
}
MPID_END_ERROR_CHECKS
}
# endif /* HAVE_ERROR_CHECKING */
/* ... body of routine ... */
*request = MPI_REQUEST_NULL;
*newcomm = MPI_COMM_NULL;
mpi_errno = MPIR_Comm_idup_impl(comm_ptr, &newcomm_ptr, &dreq);
if (mpi_errno) MPIR_ERR_POP(mpi_errno);
/* NOTE: this is a publication for most of the comm, but the context ID
* won't be valid yet, so we must "republish" relative to the request
* handle at request completion time. */
MPIR_OBJ_PUBLISH_HANDLE(*newcomm, newcomm_ptr->handle);
*request = dreq->handle;
/* ... end of body of routine ... */
fn_exit:
MPID_MPI_FUNC_EXIT(MPID_STATE_MPI_COMM_IDUP);
MPID_THREAD_CS_EXIT(GLOBAL, MPIR_THREAD_GLOBAL_ALLFUNC_MUTEX);
return mpi_errno;
fn_fail:
/* --BEGIN ERROR HANDLING-- */
# ifdef HAVE_ERROR_CHECKING
{
mpi_errno = MPIR_Err_create_code(
mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER,
"**mpi_comm_idup", "**mpi_comm_idup %C %p %p", comm, newcomm, request);
}
# endif
mpi_errno = MPIR_Err_return_comm(NULL, FCNAME, mpi_errno);
goto fn_exit;
/* --END ERROR HANDLING-- */
}
示例12: buffer
/*@
MPI_Alltoallv - Sends data from all to all processes; each process may
send a different amount of data and provide displacements for the input
and output data.
Input Parameters:
+ sendbuf - starting address of send buffer (choice)
. sendcounts - integer array equal to the group size
specifying the number of elements to send to each processor
. sdispls - integer array (of length group size). Entry
'j' specifies the displacement (relative to sendbuf from
which to take the outgoing data destined for process 'j'
. sendtype - data type of send buffer elements (handle)
. recvcounts - integer array equal to the group size
specifying the maximum number of elements that can be received from
each processor
. rdispls - integer array (of length group size). Entry
'i' specifies the displacement (relative to recvbuf at
which to place the incoming data from process 'i'
. recvtype - data type of receive buffer elements (handle)
- comm - communicator (handle)
Output Parameters:
. recvbuf - address of receive buffer (choice)
.N ThreadSafe
.N Fortran
.N Errors
.N MPI_ERR_COMM
.N MPI_ERR_COUNT
.N MPI_ERR_TYPE
.N MPI_ERR_BUFFER
@*/
int MPI_Alltoallv(const void *sendbuf, const int *sendcounts,
const int *sdispls, MPI_Datatype sendtype, void *recvbuf,
const int *recvcounts, const int *rdispls, MPI_Datatype recvtype,
MPI_Comm comm)
{
int mpi_errno = MPI_SUCCESS;
MPID_Comm *comm_ptr = NULL;
MPIR_Errflag_t errflag = MPIR_ERR_NONE;
MPID_MPI_STATE_DECL(MPID_STATE_MPI_ALLTOALLV);
MPIR_ERRTEST_INITIALIZED_ORDIE();
MPID_THREAD_CS_ENTER(GLOBAL, MPIR_THREAD_GLOBAL_ALLFUNC_MUTEX);
MPID_MPI_COLL_FUNC_ENTER(MPID_STATE_MPI_ALLTOALLV);
/* Validate parameters, especially handles needing to be converted */
# ifdef HAVE_ERROR_CHECKING
{
MPID_BEGIN_ERROR_CHECKS;
{
MPIR_ERRTEST_COMM(comm, mpi_errno);
}
MPID_END_ERROR_CHECKS;
}
# endif /* HAVE_ERROR_CHECKING */
/* Convert MPI object handles to object pointers */
MPID_Comm_get_ptr( comm, comm_ptr );
/* Validate parameters and objects (post conversion) */
# ifdef HAVE_ERROR_CHECKING
{
MPID_BEGIN_ERROR_CHECKS;
{
MPID_Datatype *sendtype_ptr=NULL, *recvtype_ptr=NULL;
int i, comm_size;
int check_send = (comm_ptr->comm_kind == MPID_INTRACOMM && sendbuf != MPI_IN_PLACE);
MPID_Comm_valid_ptr( comm_ptr, mpi_errno, FALSE );
if (mpi_errno != MPI_SUCCESS) goto fn_fail;
if (comm_ptr->comm_kind == MPID_INTRACOMM) {
comm_size = comm_ptr->local_size;
if (sendbuf != MPI_IN_PLACE && sendtype == recvtype && sendcounts == recvcounts)
MPIR_ERRTEST_ALIAS_COLL(sendbuf, recvbuf, mpi_errno);
} else
comm_size = comm_ptr->remote_size;
if (comm_ptr->comm_kind == MPID_INTERCOMM && sendbuf == MPI_IN_PLACE) {
MPIR_ERR_SETANDJUMP(mpi_errno, MPI_ERR_OTHER, "**sendbuf_inplace");
}
for (i=0; i<comm_size; i++) {
if (check_send) {
MPIR_ERRTEST_COUNT(sendcounts[i], mpi_errno);
MPIR_ERRTEST_DATATYPE(sendtype, "sendtype", mpi_errno);
}
MPIR_ERRTEST_COUNT(recvcounts[i], mpi_errno);
MPIR_ERRTEST_DATATYPE(recvtype, "recvtype", mpi_errno);
}
if (check_send && HANDLE_GET_KIND(sendtype) != HANDLE_KIND_BUILTIN) {
MPID_Datatype_get_ptr(sendtype, sendtype_ptr);
MPID_Datatype_valid_ptr( sendtype_ptr, mpi_errno );
if (mpi_errno != MPI_SUCCESS) goto fn_fail;
//.........这里部分代码省略.........
示例13: buffer
/*@
MPI_Allreduce - Combines values from all processes and distributes the result
back to all processes
Input Parameters:
+ sendbuf - starting address of send buffer (choice)
. count - number of elements in send buffer (integer)
. datatype - data type of elements of send buffer (handle)
. op - operation (handle)
- comm - communicator (handle)
Output Parameter:
. recvbuf - starting address of receive buffer (choice)
.N ThreadSafe
.N Fortran
.N collops
.N Errors
.N MPI_ERR_BUFFER
.N MPI_ERR_COUNT
.N MPI_ERR_TYPE
.N MPI_ERR_OP
.N MPI_ERR_COMM
@*/
int MPI_Allreduce ( void *sendbuf, void *recvbuf, int count,
MPI_Datatype datatype, MPI_Op op, MPI_Comm comm )
{
static const char FCNAME[] = "MPI_Allreduce";
int mpi_errno = MPI_SUCCESS;
MPID_Comm *comm_ptr = NULL;
int errflag = FALSE;
MPID_MPI_STATE_DECL(MPID_STATE_MPI_ALLREDUCE);
MPIR_ERRTEST_INITIALIZED_ORDIE();
MPIU_THREAD_CS_ENTER(ALLFUNC,);
MPID_MPI_COLL_FUNC_ENTER(MPID_STATE_MPI_ALLREDUCE);
/* Validate parameters, especially handles needing to be converted */
# ifdef HAVE_ERROR_CHECKING
{
MPID_BEGIN_ERROR_CHECKS;
{
MPIR_ERRTEST_COMM(comm, mpi_errno);
if (mpi_errno != MPI_SUCCESS) goto fn_fail;
}
MPID_END_ERROR_CHECKS;
}
# endif /* HAVE_ERROR_CHECKING */
/* Convert MPI object handles to object pointers */
MPID_Comm_get_ptr( comm, comm_ptr );
/* Validate parameters and objects (post conversion) */
# ifdef HAVE_ERROR_CHECKING
{
MPID_BEGIN_ERROR_CHECKS;
{
MPID_Datatype *datatype_ptr = NULL;
MPID_Op *op_ptr = NULL;
MPID_Comm_valid_ptr( comm_ptr, mpi_errno );
if (mpi_errno != MPI_SUCCESS) goto fn_fail;
MPIR_ERRTEST_COUNT(count, mpi_errno);
MPIR_ERRTEST_DATATYPE(datatype, "datatype", mpi_errno);
MPIR_ERRTEST_OP(op, mpi_errno);
if (HANDLE_GET_KIND(datatype) != HANDLE_KIND_BUILTIN) {
MPID_Datatype_get_ptr(datatype, datatype_ptr);
MPID_Datatype_valid_ptr( datatype_ptr, mpi_errno );
MPID_Datatype_committed_ptr( datatype_ptr, mpi_errno );
}
if (comm_ptr->comm_kind == MPID_INTERCOMM)
MPIR_ERRTEST_SENDBUF_INPLACE(sendbuf, count, mpi_errno);
if (sendbuf != MPI_IN_PLACE)
MPIR_ERRTEST_USERBUFFER(sendbuf,count,datatype,mpi_errno);
MPIR_ERRTEST_RECVBUF_INPLACE(recvbuf, count, mpi_errno);
MPIR_ERRTEST_USERBUFFER(recvbuf,count,datatype,mpi_errno);
if (mpi_errno != MPI_SUCCESS) goto fn_fail;
if (HANDLE_GET_KIND(op) != HANDLE_KIND_BUILTIN) {
MPID_Op_get_ptr(op, op_ptr);
MPID_Op_valid_ptr( op_ptr, mpi_errno );
}
if (HANDLE_GET_KIND(op) == HANDLE_KIND_BUILTIN) {
mpi_errno =
( * MPIR_Op_check_dtype_table[op%16 - 1] )(datatype);
}
if (count != 0) {
MPIR_ERRTEST_ALIAS_COLL(sendbuf, recvbuf, mpi_errno);
}
if (mpi_errno != MPI_SUCCESS) goto fn_fail;
}
//.........这里部分代码省略.........
示例14: intercommunicator
/*@
MPI_Comm_join - Create a communicator by joining two processes connected by
a socket.
Input Parameter:
. fd - socket file descriptor
Output Parameter:
. intercomm - new intercommunicator (handle)
Notes:
The socket must be quiescent before 'MPI_COMM_JOIN' is called and after
'MPI_COMM_JOIN' returns. More specifically, on entry to 'MPI_COMM_JOIN', a
read on the socket will not read any data that was written to the socket
before the remote process called 'MPI_COMM_JOIN'.
.N ThreadSafe
.N Fortran
.N Errors
.N MPI_SUCCESS
.N MPI_ERR_ARG
@*/
int MPI_Comm_join(int fd, MPI_Comm *intercomm)
{
static const char FCNAME[] = "MPI_Comm_join";
int mpi_errno = MPI_SUCCESS, err;
MPID_Comm *intercomm_ptr;
char *local_port, *remote_port;
MPIU_CHKLMEM_DECL(2);
MPID_MPI_STATE_DECL(MPID_STATE_MPI_COMM_JOIN);
MPIR_ERRTEST_INITIALIZED_ORDIE();
MPIU_THREAD_CS_ENTER(ALLFUNC,);
MPID_MPI_FUNC_ENTER(MPID_STATE_MPI_COMM_JOIN);
/* ... body of routine ... */
MPIU_CHKLMEM_MALLOC(local_port, char *, MPI_MAX_PORT_NAME, mpi_errno, "local port name");
MPIU_CHKLMEM_MALLOC(remote_port, char *, MPI_MAX_PORT_NAME, mpi_errno, "remote port name");
mpi_errno = MPIR_Open_port_impl(NULL, local_port);
MPIU_ERR_CHKANDJUMP((mpi_errno != MPI_SUCCESS), mpi_errno, MPI_ERR_OTHER, "**openportfailed");
err = MPIR_fd_send(fd, local_port, MPI_MAX_PORT_NAME);
MPIU_ERR_CHKANDJUMP1((err != 0), mpi_errno, MPI_ERR_INTERN, "**join_send", "**join_send %d", err);
err = MPIR_fd_recv(fd, remote_port, MPI_MAX_PORT_NAME);
MPIU_ERR_CHKANDJUMP1((err != 0), mpi_errno, MPI_ERR_INTERN, "**join_recv", "**join_recv %d", err);
MPIU_ERR_CHKANDJUMP2((strcmp(local_port, remote_port) == 0), mpi_errno, MPI_ERR_INTERN, "**join_portname",
"**join_portname %s %s", local_port, remote_port);
if (strcmp(local_port, remote_port) < 0) {
MPID_Comm *comm_self_ptr;
MPID_Comm_get_ptr( MPI_COMM_SELF, comm_self_ptr );
mpi_errno = MPIR_Comm_accept_impl(local_port, NULL, 0, comm_self_ptr, &intercomm_ptr);
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
} else {
MPID_Comm *comm_self_ptr;
MPID_Comm_get_ptr( MPI_COMM_SELF, comm_self_ptr );
mpi_errno = MPIR_Comm_connect_impl(remote_port, NULL, 0, comm_self_ptr, &intercomm_ptr);
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
}
mpi_errno = MPIR_Close_port_impl(local_port);
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
MPIU_OBJ_PUBLISH_HANDLE(*intercomm, intercomm_ptr->handle);
/* ... end of body of routine ... */
fn_exit:
MPIU_CHKLMEM_FREEALL();
MPID_MPI_FUNC_EXIT(MPID_STATE_MPI_COMM_JOIN);
MPIU_THREAD_CS_EXIT(ALLFUNC,);
return mpi_errno;
fn_fail:
/* --BEGIN ERROR HANDLING-- */
# ifdef HAVE_ERROR_CHECKING
{
mpi_errno = MPIR_Err_create_code(
mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**mpi_comm_join",
"**mpi_comm_join %d %p", fd, intercomm);
}
# endif
mpi_errno = MPIR_Err_return_comm( NULL, FCNAME, mpi_errno );
goto fn_exit;
/* --END ERROR HANDLING-- */
}
示例15: buffer
/*@
MPI_Bsend - Basic send with user-provided buffering
Input Parameters:
+ buf - initial address of send buffer (choice)
. count - number of elements in send buffer (nonnegative integer)
. datatype - datatype of each send buffer element (handle)
. dest - rank of destination (integer)
. tag - message tag (integer)
- comm - communicator (handle)
Notes:
This send is provided as a convenience function; it allows the user to
send messages without worring about where they are buffered (because the
user `must` have provided buffer space with 'MPI_Buffer_attach').
In deciding how much buffer space to allocate, remember that the buffer space
is not available for reuse by subsequent 'MPI_Bsend's unless you are certain
that the message
has been received (not just that it should have been received). For example,
this code does not allocate enough buffer space
.vb
MPI_Buffer_attach( b, n*sizeof(double) + MPI_BSEND_OVERHEAD );
for (i=0; i<m; i++) {
MPI_Bsend( buf, n, MPI_DOUBLE, ... );
}
.ve
because only enough buffer space is provided for a single send, and the
loop may start a second 'MPI_Bsend' before the first is done making use of the
buffer.
In C, you can
force the messages to be delivered by
.vb
MPI_Buffer_detach( &b, &n );
MPI_Buffer_attach( b, n );
.ve
(The 'MPI_Buffer_detach' will not complete until all buffered messages are
delivered.)
.N ThreadSafe
.N Fortran
.N Errors
.N MPI_SUCCESS
.N MPI_ERR_COMM
.N MPI_ERR_COUNT
.N MPI_ERR_TYPE
.N MPI_ERR_RANK
.N MPI_ERR_TAG
.seealso: MPI_Buffer_attach, MPI_Ibsend, MPI_Bsend_init
@*/
int MPI_Bsend(const void *buf, int count, MPI_Datatype datatype, int dest, int tag,
MPI_Comm comm)
{
static const char FCNAME[] = "MPI_Bsend";
int mpi_errno = MPI_SUCCESS;
MPID_Comm *comm_ptr = NULL;
MPID_Request *request_ptr = NULL;
MPID_MPI_STATE_DECL(MPID_STATE_MPI_BSEND);
MPIR_ERRTEST_INITIALIZED_ORDIE();
MPID_THREAD_CS_ENTER(GLOBAL, MPIR_THREAD_GLOBAL_ALLFUNC_MUTEX);
MPID_MPI_PT2PT_FUNC_ENTER_FRONT(MPID_STATE_MPI_BSEND);
/* Validate handle parameters needing to be converted */
# ifdef HAVE_ERROR_CHECKING
{
MPID_BEGIN_ERROR_CHECKS;
{
MPIR_ERRTEST_COMM(comm, mpi_errno);
}
MPID_END_ERROR_CHECKS;
}
# endif /* HAVE_ERROR_CHECKING */
/* Convert MPI object handles to object pointers */
MPID_Comm_get_ptr( comm, comm_ptr );
/* Validate object pointers if error checking is enabled */
# ifdef HAVE_ERROR_CHECKING
{
MPID_BEGIN_ERROR_CHECKS;
{
MPIR_ERRTEST_COUNT(count,mpi_errno);
/* Validate comm_ptr */
MPID_Comm_valid_ptr( comm_ptr, mpi_errno, FALSE );
if (mpi_errno) goto fn_fail;
/* If comm_ptr is not valid, it will be reset to null */
if (comm_ptr) {
MPIR_ERRTEST_SEND_TAG(tag,mpi_errno);
MPIR_ERRTEST_SEND_RANK(comm_ptr,dest,mpi_errno)
}
/* Validate datatype handle */
MPIR_ERRTEST_DATATYPE(datatype, "datatype", mpi_errno);
/* Validate datatype object */
//.........这里部分代码省略.........