本文整理汇总了C++中MPIU_Malloc函数的典型用法代码示例。如果您正苦于以下问题:C++ MPIU_Malloc函数的具体用法?C++ MPIU_Malloc怎么用?C++ MPIU_Malloc使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了MPIU_Malloc函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: sizeof
/*
* Indexed
*/
MPID_Dataloop *MPID_Dataloop_init_indexed( int count, int *blocksize,
MPI_Aint *offset )
{
MPID_Dataloop *it;
MPI_Aint extent = 0;
int i;
it = (MPID_Dataloop *)MPIU_Malloc( sizeof(MPID_Dataloop) );
it->kind = MPID_DTYPE_INDEXED | DATALOOP_FINAL_MASK;
it->loop_params.i_t.count = count;
it->loop_params.i_t.blocksize = (int *)MPIU_Malloc( sizeof(int) * count );
it->loop_params.i_t.offset =
(MPI_Aint *)MPIU_Malloc( sizeof(MPI_Aint) * count );
for (i=0; i<count; i++) {
it->loop_params.i_t.offset[i] = offset[i];
it->loop_params.i_t.blocksize[i] = blocksize[i];
if (offset[i] + blocksize[i] > extent)
extent = offset[i] + blocksize[i];
}
it->loop_params.i_t.dataloop = 0;
it->extent = extent;
it->handle = 0;
return it;
}
示例2: MPIE_Putenv
/*
* Add an enviroinment variable to the global list of variables
*/
int MPIE_Putenv( ProcessWorld *pWorld, const char *env_string )
{
EnvInfo *genv;
EnvData *p;
/* FIXME: This should be getGenv (so allocation/init in one place) */
if (!pWorld->genv) {
genv = (EnvInfo *)MPIU_Malloc( sizeof(EnvInfo) );
genv->includeAll = 1;
genv->envPairs = 0;
genv->envNames = 0;
pWorld->genv = genv;
}
genv = pWorld->genv;
p = (EnvData *)MPIU_Malloc( sizeof(EnvData) );
if (!p) return 1;
p->name = 0;
p->value = 0;
p->envvalue = (const char *)MPIU_Strdup( env_string );
if (!p->envvalue) return 1;
p->nextData = genv->envPairs;
genv->envPairs = p;
return 0;
}
示例3: MPID_nem_ib_allocate_memory
int
MPID_nem_ib_allocate_memory(int pg_rank, int pg_size)
{
int mpi_errno = MPI_SUCCESS;
process_info.polling_group_size = 0;
if (rdma_polling_set_limit > 0)
{
process_info.polling_set = (MPIDI_VC_t**) MPIU_Malloc(rdma_polling_set_limit * sizeof(MPIDI_VC_t*));
}
else
{
process_info.polling_set = (MPIDI_VC_t**) MPIU_Malloc(pg_size * sizeof(MPIDI_VC_t*));
}
if (!process_info.polling_set)
{
fprintf(
stderr,
"[%s:%d]: %s\n",
__FILE__,
__LINE__,
"unable to allocate space for polling set\n");
return 0;
}
/* We need to allocate vbufs for send/recv path */
if ((mpi_errno = allocate_vbufs(rdma_vbuf_pool_size)))
{
return mpi_errno;
}
return mpi_errno;
}
示例4: MPID_NS_Create
int MPID_NS_Create( const MPID_Info *info_ptr, MPID_NS_Handle *handle_ptr )
{
static const char FCNAME[] = "MPID_NS_Create";
int err;
int length;
char *pmi_namepub_kvs;
*handle_ptr = (MPID_NS_Handle)MPIU_Malloc( sizeof(struct MPID_NS_Handle) );
/* --BEGIN ERROR HANDLING-- */
if (!*handle_ptr)
{
err = MPIR_Err_create_code(MPI_SUCCESS, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**nomem", 0);
return err;
}
/* --END ERROR HANDLING-- */
err = PMI_KVS_Get_name_length_max(&length);
/* --BEGIN ERROR HANDLING-- */
if (err != PMI_SUCCESS)
{
err = MPIR_Err_create_code(MPI_SUCCESS, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**fail", 0);
}
/* --END ERROR HANDLING-- */
(*handle_ptr)->kvsname = (char*)MPIU_Malloc(length);
/* --BEGIN ERROR HANDLING-- */
if (!(*handle_ptr)->kvsname)
{
err = MPIR_Err_create_code(MPI_SUCCESS, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**nomem", 0);
return err;
}
/* --END ERROR HANDLING-- */
pmi_namepub_kvs = getenv("PMI_NAMEPUB_KVS");
if (pmi_namepub_kvs)
{
MPIU_Strncpy((*handle_ptr)->kvsname, pmi_namepub_kvs, length);
}
else
{
err = PMI_KVS_Get_my_name((*handle_ptr)->kvsname, length);
/* --BEGIN ERROR HANDLING-- */
if (err != PMI_SUCCESS)
{
err = MPIR_Err_create_code(MPI_SUCCESS, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**fail", 0);
}
/* --END ERROR HANDLING-- */
}
/*printf("namepub kvs: <%s>\n", (*handle_ptr)->kvsname);fflush(stdout);*/
return 0;
}
示例5: reinit_pmi
static int reinit_pmi(void)
{
int ret;
int has_parent = 0;
int pg_rank, pg_size;
int kvs_name_sz, pg_id_sz;
MPIDI_STATE_DECL(MPID_STATE_REINIT_PMI);
MPIDI_FUNC_ENTER(MPID_STATE_REINIT_PMI);
/* Init pmi and do some sanity checks */
ret = PMI_Init(&has_parent);
CHECK_ERR(ret, "pmi_init");
ret = PMI_Get_rank(&pg_rank);
CHECK_ERR(ret, "pmi_get_rank");
ret = PMI_Get_size(&pg_size);
CHECK_ERR(ret, "pmi_get_size");
CHECK_ERR(pg_size != MPIDI_Process.my_pg->size, "pg size differs after restart");
CHECK_ERR(pg_rank != MPIDI_Process.my_pg_rank, "pg rank differs after restart");
/* get new pg_id */
ret = PMI_KVS_Get_name_length_max(&pg_id_sz);
CHECK_ERR(ret, "pmi_get_id_length_max");
MPIU_Free(MPIDI_Process.my_pg->id);
MPIDI_Process.my_pg->id = MPIU_Malloc(pg_id_sz + 1);
CHECK_ERR(MPIDI_Process.my_pg->id == NULL, "malloc failed");
ret = PMI_KVS_Get_my_name(MPIDI_Process.my_pg->id, pg_id_sz);
CHECK_ERR(ret, "pmi_kvs_get_my_name");
/* get new kvsname */
ret = PMI_KVS_Get_name_length_max(&kvs_name_sz);
CHECK_ERR(ret, "PMI_KVS_Get_name_length_max");
MPIU_Free(MPIDI_Process.my_pg->connData);
MPIDI_Process.my_pg->connData = MPIU_Malloc(kvs_name_sz + 1);
CHECK_ERR(MPIDI_Process.my_pg->connData == NULL, "malloc failed");
ret = PMI_KVS_Get_my_name(MPIDI_Process.my_pg->connData, kvs_name_sz);
CHECK_ERR(ret, "PMI_Get_my_name");
MPIDI_FUNC_EXIT(MPID_STATE_REINIT_PMI);
return 0;
}
示例6: MPIDI_CH3U_Post_data_receive_unexpected
int MPIDI_CH3U_Post_data_receive_unexpected(MPID_Request * rreq)
{
int mpi_errno = MPI_SUCCESS;
MPIDI_STATE_DECL(MPID_STATE_MPIDI_CH3U_POST_DATA_RECEIVE_UNEXPECTED);
MPIDI_FUNC_ENTER(MPID_STATE_MPIDI_CH3U_POST_DATA_RECEIVE_UNEXPECTED);
/* FIXME: to improve performance, allocate temporary buffer from a
specialized buffer pool. */
/* FIXME: to avoid memory exhaustion, integrate buffer pool management
with flow control */
MPIU_DBG_MSG(CH3_OTHER,VERBOSE,"unexpected request allocated");
rreq->dev.tmpbuf = MPIU_Malloc(rreq->dev.recv_data_sz);
if (!rreq->dev.tmpbuf) {
MPIU_ERR_SETANDJUMP1(mpi_errno,MPI_ERR_OTHER,"**nomem","**nomem %d",
rreq->dev.recv_data_sz);
}
rreq->dev.tmpbuf_sz = rreq->dev.recv_data_sz;
rreq->dev.iov[0].MPID_IOV_BUF = (MPID_IOV_BUF_CAST)rreq->dev.tmpbuf;
rreq->dev.iov[0].MPID_IOV_LEN = rreq->dev.recv_data_sz;
rreq->dev.iov_count = 1;
rreq->dev.OnDataAvail = MPIDI_CH3_ReqHandler_UnpackUEBufComplete;
rreq->dev.recv_pending_count = 2;
fn_fail:
MPIDI_FUNC_EXIT(MPID_STATE_MPIDI_CH3U_POST_DATA_RECEIVE_UNEXPECTED);
return mpi_errno;
}
示例7: context_id_init
static void context_id_init(void)
{
int i;
#if defined(FINEGRAIN_MPI)
MPIU_Assert (NULL == context_mask);
context_mask = (uint32_t *)MPIU_Malloc(MPIR_MAX_CONTEXT_MASK * sizeof(uint32_t));
MPIU_Assert (NULL != context_mask);
#endif
for (i = 1; i < MPIR_MAX_CONTEXT_MASK; i++) {
context_mask[i] = 0xFFFFFFFF;
}
/* The first two values are already used (comm_world, comm_self).
* The third value is also used for the internal-only copy of
* comm_world, if needed by mpid. */
#ifdef MPID_NEEDS_ICOMM_WORLD
context_mask[0] = 0xFFFFFFF8;
#else
context_mask[0] = 0xFFFFFFFC;
#endif
initialize_context_mask = 0;
#ifdef MPICH_DEBUG_HANDLEALLOC
/* check for context ID leaks in MPI_Finalize. Use (_PRIO-1) to make sure
* that we run after MPID_Finalize. */
MPIR_Add_finalize(check_context_ids_on_finalize, context_mask, MPIR_FINALIZE_CALLBACK_PRIO - 1); /* FG: TODO IMPORTANT */
#endif
}
示例8: MPIU_Thread_create
/*
* MPIU_Thread_create()
*/
void MPIU_Thread_create(MPIU_Thread_func_t func, void * data, MPIU_Thread_id_t * idp, int * errp)
{
struct MPEI_Thread_info * thread_info;
int err = MPIU_THREAD_SUCCESS;
/* FIXME: faster allocation, or avoid it all together? */
thread_info = (struct MPEI_Thread_info *) MPIU_Malloc(sizeof(struct MPEI_Thread_info));
if (thread_info != NULL)
{
pthread_attr_t attr;
thread_info->func = func;
thread_info->data = data;
pthread_attr_init(&attr);
pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
err = pthread_create(idp, &attr, MPEI_Thread_start, thread_info);
/* FIXME: convert error to an MPIU_THREAD_ERR value */
pthread_attr_destroy(&attr);
}
else
{
err = 1000000000;
}
if (errp != NULL)
{
*errp = err;
}
}
示例9: MPID_nem_ib_init_hash_table
int MPID_nem_ib_init_hash_table(
MPID_nem_ib_hash_table_ptr_t table,
uint32_t nentries)
{
int mpi_errno = MPI_SUCCESS;
table->entries = MPIU_Malloc(
sizeof(MPID_nem_ib_hash_elem_t) * nentries);
table->num_entries = nentries;
if(NULL == table->entries) {
MPIU_CHKMEM_SETERR(mpi_errno,
sizeof(MPID_nem_ib_hash_elem_t) * nentries,
"IB Module Hash Table");
}
memset(table->entries, 0,
sizeof(MPID_nem_ib_hash_elem_t) * nentries);
pthread_mutex_init(&table->hash_table_lock, NULL);
/* fn_exit: */
return mpi_errno;
/* fn_fail:
goto fn_exit;
*/
}
示例10: create_r_cookie
static int create_r_cookie (char *hostname, int port, int data_sz, char **cookie, int *len)
{
int mpi_errno = MPI_SUCCESS;
int hostname_len;
int cookie_len;
r_cookie_t *c;
hostname_len = strnlen (hostname, MAX_HOSTNAME_LEN) + 1;
cookie_len = sizeof (r_cookie_t) - 1 + hostname_len;
c = MPIU_Malloc (cookie_len);
MPIU_ERR_CHKANDJUMP (c == NULL, mpi_errno, MPI_ERR_OTHER, "**nomem");
c->port = port;
c->data_sz = data_sz;
MPIU_Strncpy (c->hostname, hostname, hostname_len);
*cookie = (char *)c;
*len = sizeof (r_cookie_t) - 1 + hostname_len;
fn_exit:
return mpi_errno;
fn_fail:
goto fn_exit;
}
示例11: MPIDI_Win_datatype_map
void
MPIDI_Win_datatype_map(MPIDI_Datatype * dt)
{
if (dt->contig)
{
dt->num_contig = 1;
dt->map = &dt->__map;
dt->map[0].DLOOP_VECTOR_BUF = (void*)(size_t)dt->true_lb;
dt->map[0].DLOOP_VECTOR_LEN = dt->size;
}
else
{
unsigned map_size = dt->pointer->max_contig_blocks*dt->count + 1;
dt->num_contig = map_size;
dt->map = (DLOOP_VECTOR*)MPIU_Malloc(map_size * sizeof(DLOOP_VECTOR));
MPID_assert(dt->map != NULL);
DLOOP_Offset last = dt->pointer->size*dt->count;
MPID_Segment seg;
MPID_Segment_init(NULL, dt->count, dt->type, &seg, 0);
MPID_Segment_pack_vector(&seg, 0, &last, dt->map, &dt->num_contig);
MPID_assert((unsigned)dt->num_contig <= map_size);
#ifdef TRACE_ON
TRACE_ERR("dt->pointer->size=%d num_contig: orig=%u new=%d\n", dt->pointer->size, map_size, dt->num_contig);
int i;
for(i=0; i<dt->num_contig; ++i)
TRACE_ERR(" %d: BUF=%zu LEN=%zu\n", i, (size_t)dt->map[i].DLOOP_VECTOR_BUF, (size_t)dt->map[i].DLOOP_VECTOR_LEN);
#endif
}
}
示例12: MPIR_Ibsend_impl
int MPIR_Ibsend_impl(const void *buf, int count, MPI_Datatype datatype, int dest, int tag,
MPID_Comm *comm_ptr, MPI_Request *request)
{
int mpi_errno = MPI_SUCCESS;
MPID_Request *request_ptr, *new_request_ptr;
ibsend_req_info *ibinfo=0;
/* We don't try tbsend in for MPI_Ibsend because we must create a
request even if we can send the message */
mpi_errno = MPIR_Bsend_isend( buf, count, datatype, dest, tag, comm_ptr,
IBSEND, &request_ptr );
if (mpi_errno != MPI_SUCCESS) goto fn_fail;
/* FIXME: use the memory management macros */
ibinfo = (ibsend_req_info *)MPIU_Malloc( sizeof(ibsend_req_info) );
ibinfo->req = request_ptr;
ibinfo->cancelled = 0;
mpi_errno = MPIR_Grequest_start_impl( MPIR_Ibsend_query, MPIR_Ibsend_free,
MPIR_Ibsend_cancel, ibinfo, &new_request_ptr );
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
/* The request is immediately complete because the MPIR_Bsend_isend has
already moved the data out of the user's buffer */
MPIR_Request_add_ref( request_ptr );
/* Request count is now 2 (set to 1 in Grequest_start) */
MPIR_Grequest_complete_impl(new_request_ptr);
MPIU_OBJ_PUBLISH_HANDLE(*request, new_request_ptr->handle);
fn_exit:
return mpi_errno;
fn_fail:
goto fn_exit;
}
示例13: MPIR_Sendq_remember
/* This routine is used to establish a queue of send requests to allow the
debugger easier access to the active requests. Some devices may be able
to provide this information without requiring this separate queue. */
void MPIR_Sendq_remember( MPID_Request *req,
int rank, int tag, int context_id )
{
MPIR_Sendq *p;
MPID_THREAD_CS_ENTER(POBJ, req->pobj_mutex);
if (pool) {
p = pool;
pool = p->next;
}
else {
p = (MPIR_Sendq *)MPIU_Malloc( sizeof(MPIR_Sendq) );
if (!p) {
/* Just ignore it */
req->dbg_next = NULL;
goto fn_exit;
}
}
p->sreq = req;
p->tag = tag;
p->rank = rank;
p->context_id = context_id;
p->next = MPIR_Sendq_head;
p->prev = NULL;
MPIR_Sendq_head = p;
if (p->next) p->next->prev = p;
req->dbg_next = p;
fn_exit:
MPID_THREAD_CS_EXIT(POBJ, req->pobj_mutex);
}
示例14: MPITEST_Group_create
/* --BEGIN DEBUG-- */
void MPITEST_Group_create( int nproc, int myrank, MPI_Group *new_group )
{
MPID_Group *new_group_ptr;
int i;
new_group_ptr = (MPID_Group *)MPIU_Handle_obj_alloc( &MPID_Group_mem );
if (!new_group_ptr) {
fprintf( stderr, "Could not create a new group\n" );
PMPI_Abort( MPI_COMM_WORLD, 1 );
}
MPIU_Object_set_ref( new_group_ptr, 1 );
new_group_ptr->lrank_to_lpid = (MPID_Group_pmap_t *)MPIU_Malloc( nproc * sizeof(MPID_Group_pmap_t) );
if (!new_group_ptr->lrank_to_lpid) {
fprintf( stderr, "Could not create lrank map for new group\n" );
PMPI_Abort( MPI_COMM_WORLD, 1 );
}
new_group_ptr->rank = MPI_UNDEFINED;
for (i=0; i<nproc; i++) {
new_group_ptr->lrank_to_lpid[i].lrank = i;
new_group_ptr->lrank_to_lpid[i].lpid = i;
}
new_group_ptr->size = nproc;
new_group_ptr->rank = myrank;
new_group_ptr->idx_of_first_lpid = -1;
*new_group = new_group_ptr->handle;
}
示例15: strerror
IRLOG_IOStruct *IRLOG_CreateOutputStruct(const char *filename)
{
IRLOG_IOStruct *pOutput = NULL;
/* allocate a data structure */
pOutput = (IRLOG_IOStruct*)MPIU_Malloc(sizeof(IRLOG_IOStruct));
if (pOutput == NULL)
{
MPIU_Error_printf("malloc failed - %s\n", strerror(errno));
return NULL;
}
/* open the output clog file */
pOutput->f = fopen(filename, "wb");
if (pOutput->f == NULL)
{
MPIU_Error_printf("Unable to open output file '%s' - %s\n", filename, strerror(errno));
MPIU_Free(pOutput);
return NULL;
}
/* set all the data fields */
pOutput->header.type = RLOG_INVALID_TYPE;
pOutput->pCurHeader = pOutput->buffer;
pOutput->pNextHeader = pOutput->buffer;
pOutput->pEnd = &pOutput->buffer[RLOG_BUFFSIZE];
return pOutput;
}