本文整理汇总了C++中OPAL_THREAD_LOCK函数的典型用法代码示例。如果您正苦于以下问题:C++ OPAL_THREAD_LOCK函数的具体用法?C++ OPAL_THREAD_LOCK怎么用?C++ OPAL_THREAD_LOCK使用的例子?那么, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了OPAL_THREAD_LOCK函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: mca_io_romio314_file_iwrite
int
mca_io_romio314_file_iwrite (ompi_file_t *fh,
const void *buf,
int count,
struct ompi_datatype_t *datatype,
ompi_request_t **request)
{
int ret;
mca_io_romio314_data_t *data;
data = (mca_io_romio314_data_t *) fh->f_io_selected_data;
OPAL_THREAD_LOCK (&mca_io_romio314_mutex);
ret =
ROMIO_PREFIX(MPI_File_iwrite) (data->romio_fh, buf, count, datatype,
request);
OPAL_THREAD_UNLOCK (&mca_io_romio314_mutex);
return ret;
}
示例2: mca_io_romio314_file_write_all
int
mca_io_romio314_file_write_all (ompi_file_t *fh,
const void *buf,
int count,
struct ompi_datatype_t *datatype,
ompi_status_public_t * status)
{
int ret;
mca_io_romio314_data_t *data;
data = (mca_io_romio314_data_t *) fh->f_io_selected_data;
OPAL_THREAD_LOCK (&mca_io_romio314_mutex);
ret =
ROMIO_PREFIX(MPI_File_write_all) (data->romio_fh, buf, count, datatype,
status);
OPAL_THREAD_UNLOCK (&mca_io_romio314_mutex);
return ret;
}
示例3: orte_waitpid
pid_t
orte_waitpid(pid_t wpid, int *status, int options)
{
opal_process_handle_t* pending;
OPAL_THREAD_LOCK(&mutex);
/**
* Is the child already gone ?
*/
pending = find_pending_pid( wpid, false );
if( NULL != pending ) {
*status = pending->status;
opal_list_remove_item( &pending_pids, (opal_list_item_t*)pending );
OBJ_RELEASE(pending);
OPAL_THREAD_UNLOCK(&mutex);
return wpid;
}
/**
* Do we have any registered callback for this particular pid ?
*/
pending = find_pending_cb( wpid, false );
if( NULL != pending ) {
opal_list_remove_item( ®istered_cb, (opal_list_item_t*)pending );
OBJ_RELEASE( pending );
}
/**
* No luck so far. Wait until the process complete ...
*/
if( WAIT_OBJECT_0 == WaitForSingleObject( (HANDLE)wpid, INFINITE ) ) {
DWORD exitCode;
/* Process completed. Grab the exit value and return. */
if( 0 == GetExitCodeProcess( (HANDLE)wpid, &exitCode ) ) {
int error = GetLastError();
}
*status = (int)exitCode;
}
OPAL_THREAD_UNLOCK(&mutex);
return wpid;
}
示例4: mca_oob_tcp_peer_send
/*
* Initiate the appropriate action based on the state of the connection
* to the peer.
*
*/
int mca_oob_tcp_peer_send(mca_oob_tcp_peer_t* peer, mca_oob_tcp_msg_t* msg)
{
int rc = ORTE_SUCCESS;
OPAL_THREAD_LOCK(&peer->peer_lock);
switch(peer->peer_state) {
case MCA_OOB_TCP_CONNECTING:
case MCA_OOB_TCP_CONNECT_ACK:
case MCA_OOB_TCP_CLOSED:
case MCA_OOB_TCP_RESOLVE:
/*
* queue the message and attempt to resolve the peer address
*/
opal_list_append(&peer->peer_send_queue, (opal_list_item_t*)msg);
if(peer->peer_state == MCA_OOB_TCP_CLOSED) {
peer->peer_state = MCA_OOB_TCP_RESOLVE;
OPAL_THREAD_UNLOCK(&peer->peer_lock);
return mca_oob_tcp_resolve(peer);
}
break;
case MCA_OOB_TCP_FAILED:
rc = ORTE_ERR_UNREACH;
break;
case MCA_OOB_TCP_CONNECTED:
/*
* start the message and queue if not completed
*/
if (NULL != peer->peer_send_msg) {
opal_list_append(&peer->peer_send_queue, (opal_list_item_t*)msg);
} else {
/*if the send does not complete */
if(!mca_oob_tcp_msg_send_handler(msg, peer)) {
peer->peer_send_msg = msg;
opal_event_add(&peer->peer_send_event, 0);
} else {
mca_oob_tcp_msg_complete(msg, &peer->peer_name);
}
}
break;
}
OPAL_THREAD_UNLOCK(&peer->peer_lock);
return rc;
}
示例5: oshmem_proc_pack
int oshmem_proc_pack(oshmem_proc_t **proclist,
int proclistsize,
opal_buffer_t* buf)
{
int i, rc;
OPAL_THREAD_LOCK(&oshmem_proc_lock);
/* cycle through the provided array, packing the OSHMEM level
* data for each proc. This data may or may not be included
* in any subsequent modex operation, so we include it here
* to ensure completion of a connect/accept handshake. See
* the ompi/mca/dpm framework for an example of where and how
* this info is used.
*
* Eventually, we will review the procedures that call this
* function to see if duplication of communication can be
* reduced. For now, just go ahead and pack the info so it
* can be sent.
*/
for (i = 0; i < proclistsize; i++) {
rc = opal_dss.pack(buf, &(proclist[i]->super.proc_name), 1, ORTE_NAME);
if (rc != ORTE_SUCCESS) {
ORTE_ERROR_LOG(rc);
OPAL_THREAD_UNLOCK(&oshmem_proc_lock);
return rc;
}
rc = opal_dss.pack(buf, &(proclist[i]->super.proc_arch), 1, OPAL_UINT32);
if (rc != ORTE_SUCCESS) {
ORTE_ERROR_LOG(rc);
OPAL_THREAD_UNLOCK(&oshmem_proc_lock);
return rc;
}
rc = opal_dss.pack(buf, &(proclist[i]->super.proc_hostname), 1, OPAL_STRING);
if (rc != ORTE_SUCCESS) {
ORTE_ERROR_LOG(rc);
OPAL_THREAD_UNLOCK(&oshmem_proc_lock);
return rc;
}
} OPAL_THREAD_UNLOCK(&oshmem_proc_lock);
return OSHMEM_SUCCESS;
}
示例6: mca_btl_tcp_proc_destruct
void mca_btl_tcp_proc_destruct(mca_btl_tcp_proc_t* tcp_proc)
{
if( NULL != tcp_proc->proc_opal ) {
/* remove from list of all proc instances */
OPAL_THREAD_LOCK(&mca_btl_tcp_component.tcp_lock);
opal_proc_table_remove_value(&mca_btl_tcp_component.tcp_procs,
tcp_proc->proc_opal->proc_name);
OPAL_THREAD_UNLOCK(&mca_btl_tcp_component.tcp_lock);
OBJ_RELEASE(tcp_proc->proc_opal);
tcp_proc->proc_opal = NULL;
}
/* release resources */
if(NULL != tcp_proc->proc_endpoints) {
free(tcp_proc->proc_endpoints);
}
if(NULL != tcp_proc->proc_addrs) {
free(tcp_proc->proc_addrs);
}
OBJ_DESTRUCT(&tcp_proc->proc_lock);
}
示例7: orte_pls_base_orted_cancel_operation
int orte_pls_base_orted_cancel_operation(void)
{
/* protect for threads */
OPAL_THREAD_LOCK(&orte_pls_base.orted_cmd_lock);
/* cancel any waiting receive - we don't want to hear it */
orte_rml.recv_cancel(ORTE_NAME_WILDCARD, ORTE_RML_TAG_PLS_ORTED_ACK);
/* set the completion status to reflect cancellation -- no need to
print anything */
completion_status = ORTE_ERR_SILENT;
/* declare us "done" so we can exit cleanly */
opal_condition_signal(&orte_pls_base.orted_cmd_cond);
/* unlock us */
OPAL_THREAD_UNLOCK(&orte_pls_base.orted_cmd_lock);
return ORTE_SUCCESS;
}
示例8: finalize
static int finalize(void)
{
int rc;
opal_list_item_t *item;
OPAL_THREAD_LOCK(&mca_iof_orted_component.lock);
while ((item = opal_list_remove_first(&mca_iof_orted_component.sinks)) != NULL) {
OBJ_RELEASE(item);
}
OBJ_DESTRUCT(&mca_iof_orted_component.sinks);
while ((item = opal_list_remove_first(&mca_iof_orted_component.procs)) != NULL) {
OBJ_RELEASE(item);
}
OBJ_DESTRUCT(&mca_iof_orted_component.procs);
/* Cancel the RML receive */
rc = orte_rml.recv_cancel(ORTE_NAME_WILDCARD, ORTE_RML_TAG_IOF_PROXY);
OPAL_THREAD_UNLOCK(&mca_iof_orted_component.lock);
OBJ_DESTRUCT(&mca_iof_orted_component.lock);
return rc;
}
示例9: ompi_free_list_resize
/**
* This function resize the free_list to contain at least the specified
* number of elements. We do not create all of them in the same memory
* segment. Instead we will several time the fl_num_per_alloc elements
* until we reach the required number of the maximum allowed by the
* initialization.
*/
int
ompi_free_list_resize(ompi_free_list_t* flist, size_t size)
{
ssize_t inc_num;
int ret = OMPI_SUCCESS;
if (flist->fl_num_allocated > size) {
return OMPI_SUCCESS;
}
OPAL_THREAD_LOCK(&((flist)->fl_lock));
inc_num = size - flist->fl_num_allocated;
while( inc_num > 0 ) {
ret = ompi_free_list_grow(flist, flist->fl_num_per_alloc);
if( OMPI_SUCCESS != ret ) break;
inc_num = size - flist->fl_num_allocated;
}
OPAL_THREAD_UNLOCK(&((flist)->fl_lock));
return ret;
}
示例10: mca_io_romio_file_iread_at
int
mca_io_romio_file_iread_at (ompi_file_t *fh,
MPI_Offset offset,
void *buf,
int count,
struct ompi_datatype_t *datatype,
ompi_request_t **request)
{
int ret;
mca_io_romio_data_t *data;
data = (mca_io_romio_data_t *) fh->f_io_selected_data;
OPAL_THREAD_LOCK (&mca_io_romio_mutex);
ret =
ROMIO_PREFIX(MPI_File_iread_at) (data->romio_fh, offset, buf, count,
datatype, request);
OPAL_THREAD_UNLOCK (&mca_io_romio_mutex);
return ret;
}
示例11: mca_io_romio314_file_get_view
int
mca_io_romio314_file_get_view (ompi_file_t *fh,
MPI_Offset * disp,
struct ompi_datatype_t ** etype,
struct ompi_datatype_t ** filetype,
char *datarep)
{
int ret;
mca_io_romio314_data_t *data;
data = (mca_io_romio314_data_t *) fh->f_io_selected_data;
OPAL_THREAD_LOCK (&mca_io_romio314_mutex);
ret =
ROMIO_PREFIX(MPI_File_get_view) (data->romio_fh, disp, etype, filetype,
datarep);
OPAL_THREAD_UNLOCK (&mca_io_romio314_mutex);
return ret;
}
示例12: ompi_proc_find
ompi_proc_t * ompi_proc_find ( const ompi_process_name_t * name )
{
ompi_proc_t *proc, *rproc=NULL;
ompi_rte_cmp_bitmask_t mask;
/* return the proc-struct which matches this jobid+process id */
mask = OMPI_RTE_CMP_JOBID | OMPI_RTE_CMP_VPID;
OPAL_THREAD_LOCK(&ompi_proc_lock);
for(proc = (ompi_proc_t*)opal_list_get_first(&ompi_proc_list);
proc != (ompi_proc_t*)opal_list_get_end(&ompi_proc_list);
proc = (ompi_proc_t*)opal_list_get_next(proc)) {
if (OPAL_EQUAL == ompi_rte_compare_name_fields(mask, &proc->proc_name, name)) {
rproc = proc;
break;
}
}
OPAL_THREAD_UNLOCK(&ompi_proc_lock);
return rproc;
}
示例13: create_send_tag
static inline int32_t
create_send_tag(ompi_osc_pt2pt_module_t *module)
{
#if OMPI_HAVE_THREAD_SUPPORT && OPAL_HAVE_ATOMIC_CMPSET_32
int32_t newval, oldval;
do {
oldval = module->p2p_tag_counter;
newval = (oldval + 1) % mca_pml.pml_max_tag;
} while (0 == opal_atomic_cmpset_32(&module->p2p_tag_counter, oldval, newval));
return newval;
#else
int32_t ret;
/* no compare and swap - have to lock the module */
OPAL_THREAD_LOCK(&module->p2p_lock);
module->p2p_tag_counter = (module->p2p_tag_counter + 1) % mca_pml.pml_max_tag;
ret = module->p2p_tag_counter;
OPAL_THREAD_UNLOCK(&module->p2p_lock);
return ret;
#endif
}
示例14: mca_oob_ud_req_is_in_list
bool mca_oob_ud_req_is_in_list (mca_oob_ud_req_t *req, opal_list_t *list)
{
opal_list_item_t *item;
bool rc = false;
OPAL_THREAD_LOCK(&mca_oob_ud_component.ud_match_lock);
for (item = opal_list_get_first (list) ;
item != opal_list_get_end (list) ;
item = opal_list_get_next (item)) {
if (item == (opal_list_item_t *) req) {
rc = true;
break;
}
}
OPAL_THREAD_UNLOCK(&mca_oob_ud_component.ud_match_lock);
return rc;
}
示例15: orte_wait_cb_cancel
int
orte_wait_cb_cancel(pid_t wpid)
{
opal_process_handle_t* pending;
OPAL_THREAD_LOCK(&mutex);
/**
* Do we have any registered callback for this particular pid ?
*/
pending = find_pending_cb( wpid, false );
if( NULL != pending ) {
opal_list_remove_item( ®istered_cb, (opal_list_item_t*)pending );
OBJ_RELEASE( pending );
OPAL_THREAD_UNLOCK(&mutex);
return ORTE_SUCCESS;
}
OPAL_THREAD_UNLOCK(&mutex);
return ORTE_ERR_BAD_PARAM;
}