本文整理汇总了C++中MUTEX_ENTER函数的典型用法代码示例。如果您正苦于以下问题:C++ MUTEX_ENTER函数的具体用法?C++ MUTEX_ENTER怎么用?C++ MUTEX_ENTER使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了MUTEX_ENTER函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: afs_tp_start
/**
* start a thread pool.
*
* @param[in] pool thread pool object
*
* @return operation status
* @retval 0 success
* @retval AFS_TP_ERROR thread create failure
*/
int
afs_tp_start(struct afs_thread_pool * pool)
{
int code, ret = 0;
struct afs_thread_pool_worker * worker;
afs_uint32 i;
MUTEX_ENTER(&pool->lock);
if (pool->state != AFS_TP_STATE_INIT) {
ret = AFS_TP_ERROR;
goto done_sync;
}
pool->state = AFS_TP_STATE_STARTING;
MUTEX_EXIT(&pool->lock);
for (i = 0; i < pool->max_threads; i++) {
code = _afs_tp_worker_start(pool, &worker);
if (code) {
ret = code;
}
}
MUTEX_ENTER(&pool->lock);
pool->state = AFS_TP_STATE_RUNNING;
done_sync:
MUTEX_EXIT(&pool->lock);
return ret;
}
示例2: _afs_tp_worker_run
/**
* low-level thread entry point.
*
* @param[in] rock opaque pointer to thread worker object
*
* @return opaque return pointer from pool entry function
*
* @internal
*/
static void *
_afs_tp_worker_run(void * rock)
{
struct afs_thread_pool_worker * worker = rock;
struct afs_thread_pool * pool = worker->pool;
/* register worker with pool */
MUTEX_ENTER(&pool->lock);
queue_Append(&pool->thread_list, worker);
pool->nthreads++;
MUTEX_EXIT(&pool->lock);
/* call high-level entry point */
worker->ret = (*pool->entry)(pool, worker, pool->work_queue, pool->rock);
/* adjust pool live thread count */
MUTEX_ENTER(&pool->lock);
osi_Assert(pool->nthreads);
queue_Remove(worker);
pool->nthreads--;
if (!pool->nthreads) {
CV_BROADCAST(&pool->shutdown_cv);
pool->state = AFS_TP_STATE_STOPPED;
}
MUTEX_EXIT(&pool->lock);
_afs_tp_worker_free(worker);
return NULL;
}
示例3: SalvageLogCleanupThread
/*
* thread to combine salvager child logs
* back into the main salvageserver log
*/
static void *
SalvageLogCleanupThread(void * arg)
{
struct log_cleanup_node * cleanup;
MUTEX_ENTER(&worker_lock);
while (1) {
while (queue_IsEmpty(&log_cleanup_queue)) {
CV_WAIT(&log_cleanup_queue.queue_change_cv, &worker_lock);
}
while (queue_IsNotEmpty(&log_cleanup_queue)) {
cleanup = queue_First(&log_cleanup_queue, log_cleanup_node);
queue_Remove(cleanup);
MUTEX_EXIT(&worker_lock);
SalvageLogCleanup(cleanup->pid);
free(cleanup);
MUTEX_ENTER(&worker_lock);
}
}
MUTEX_EXIT(&worker_lock);
return NULL;
}
示例4: VLockFileLock
/**
* lock a file on disk for the process.
*
* @param[in] lf the struct VLockFile representing the file to lock
* @param[in] offset the offset in the file to lock
* @param[in] locktype READ_LOCK or WRITE_LOCK
* @param[in] nonblock 0 to wait for conflicting locks to clear before
* obtaining the lock; 1 to fail immediately if a
* conflicting lock is held by someone else
*
* @return operation status
* @retval 0 success
* @retval EBUSY someone else is holding a conflicting lock and nonblock=1 was
* specified
* @retval EIO error acquiring file lock
*
* @note DAFS only
*
* @note do not try to lock/unlock the same offset in the same file from
* different threads; use VGetDiskLock to protect threads from each other in
* addition to other processes
*/
int
VLockFileLock(struct VLockFile *lf, afs_uint32 offset, int locktype, int nonblock)
{
int code;
osi_Assert(locktype == READ_LOCK || locktype == WRITE_LOCK);
MUTEX_ENTER(&lf->mutex);
if (lf->fd == INVALID_FD) {
lf->fd = _VOpenPath(lf->path);
if (lf->fd == INVALID_FD) {
MUTEX_EXIT(&lf->mutex);
return EIO;
}
}
lf->refcount++;
MUTEX_EXIT(&lf->mutex);
code = _VLockFd(lf->fd, offset, locktype, nonblock);
if (code) {
MUTEX_ENTER(&lf->mutex);
if (--lf->refcount < 1) {
_VCloseFd(lf->fd);
lf->fd = INVALID_FD;
}
MUTEX_EXIT(&lf->mutex);
}
return code;
}
示例5: MUTEX_ENTER
void
OMR::Monitor::enter()
{
#ifdef WIN32
MUTEX_ENTER(_monitor);
#else
int32_t rc = MUTEX_ENTER(_monitor);
TR_ASSERT(rc == 0, "error locking monitor\n");
#endif
}
示例6: defined
void
OMR::Monitor::enter()
{
#if defined(OMR_OS_WINDOWS)
MUTEX_ENTER(_monitor);
#else
int32_t rc = MUTEX_ENTER(_monitor);
TR_ASSERT(rc == 0, "error locking monitor\n");
#endif /* defined(OMR_OS_WINDOWS) */
}
示例7: SalvageChildReaperThread
static void *
SalvageChildReaperThread(void * args)
{
int slot, pid, status;
struct log_cleanup_node * cleanup;
MUTEX_ENTER(&worker_lock);
/* loop reaping our children */
while (1) {
/* wait() won't block unless we have children, so
* block on the cond var if we're childless */
while (current_workers == 0) {
CV_WAIT(&worker_cv, &worker_lock);
}
MUTEX_EXIT(&worker_lock);
cleanup = (struct log_cleanup_node *) malloc(sizeof(struct log_cleanup_node));
while (Reap_Child("salvageserver", &pid, &status) < 0) {
/* try to prevent livelock if something goes wrong */
sleep(1);
}
VOL_LOCK;
for (slot = 0; slot < Parallel; slot++) {
if (child_slot[slot] == pid)
break;
}
osi_Assert(slot < Parallel);
child_slot[slot] = 0;
VOL_UNLOCK;
SALVSYNC_doneWorkByPid(pid, status);
MUTEX_ENTER(&worker_lock);
if (cleanup) {
cleanup->pid = pid;
queue_Append(&log_cleanup_queue, cleanup);
CV_SIGNAL(&log_cleanup_queue.queue_change_cv);
}
/* ok, we've reaped a child */
current_workers--;
CV_BROADCAST(&worker_cv);
}
return NULL;
}
示例8: valgrindFreeObject
void valgrindFreeObject(MM_GCExtensionsBase *extensions, uintptr_t baseAddress)
{
int objSize;
if (MM_ForwardedHeader((omrobjectptr_t)baseAddress).isForwardedPointer())
{
/* In scavanger an object may act as pointer to another object(it's replica in another region).
In this case, getConsumedSizeInBytesWithHeader returns some junk value.
So instead we calculate the size of the object (replica) it is pointing to
and use it for freeing original object.
*/
omrobjectptr_t fwObject = MM_ForwardedHeader((omrobjectptr_t)baseAddress).getForwardedObject();
objSize = (int)((GC_ObjectModel)extensions->objectModel).getConsumedSizeInBytesWithHeader(fwObject);
}
else
{
objSize = (int)((GC_ObjectModel)extensions->objectModel).getConsumedSizeInBytesWithHeader((omrobjectptr_t)baseAddress);
}
#if defined(VALGRIND_REQUEST_LOGS)
VALGRIND_PRINTF_BACKTRACE("Clearing an object at 0x%lx of size %d\n", baseAddress, objSize);
#endif /* defined(VALGRIND_REQUEST_LOGS) */
VALGRIND_CHECK_MEM_IS_DEFINED(baseAddress, objSize);
VALGRIND_MEMPOOL_FREE(extensions->valgrindMempoolAddr, baseAddress);
MUTEX_ENTER(extensions->memcheckHashTableMutex);
hashTableRemove(extensions->memcheckHashTable, &baseAddress);
MUTEX_EXIT(extensions->memcheckHashTableMutex);
}
示例9: multi_Select
/* Return the user's connection index of the most recently ready call; that is, a call that has received at least one reply packet */
int
multi_Select(struct multi_handle *mh)
{
int index;
SPLVAR;
NETPRI;
#ifdef RX_ENABLE_LOCKS
MUTEX_ENTER(&mh->lock);
#endif /* RX_ENABLE_LOCKS */
while (mh->nextReady == mh->firstNotReady) {
if (mh->nReady == mh->nConns) {
#ifdef RX_ENABLE_LOCKS
MUTEX_EXIT(&mh->lock);
#endif /* RX_ENABLE_LOCKS */
USERPRI;
return -1;
}
#ifdef RX_ENABLE_LOCKS
CV_WAIT(&mh->cv, &mh->lock);
#else /* RX_ENABLE_LOCKS */
osi_rxSleep(mh);
#endif /* RX_ENABLE_LOCKS */
}
index = *(mh->nextReady);
(mh->nextReady) += 1;
#ifdef RX_ENABLE_LOCKS
MUTEX_EXIT(&mh->lock);
#endif /* RX_ENABLE_LOCKS */
USERPRI;
return index;
}
示例10: _tdispInit
void _tdispInit(void) {
MUTEX_INIT();
MUTEX_ENTER();
tdisp_lld_init();
MUTEX_LEAVE();
}
示例11: hxge_hw_init_niu_common
void
hxge_hw_init_niu_common(p_hxge_t hxgep)
{
p_hxge_hw_list_t hw_p;
HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_hw_init_niu_common"));
if ((hw_p = hxgep->hxge_hw_p) == NULL) {
return;
}
MUTEX_ENTER(&hw_p->hxge_cfg_lock);
if (hw_p->flags & COMMON_INIT_DONE) {
HXGE_DEBUG_MSG((hxgep, MOD_CTL, "hxge_hw_init_niu_common"
" already done for dip $%p exiting", hw_p->parent_devp));
MUTEX_EXIT(&hw_p->hxge_cfg_lock);
return;
}
hw_p->flags = COMMON_INIT_START;
HXGE_DEBUG_MSG((hxgep, MOD_CTL,
"hxge_hw_init_niu_common Started for device id %x",
hw_p->parent_devp));
(void) hxge_pfc_hw_reset(hxgep);
hw_p->flags = COMMON_INIT_DONE;
MUTEX_EXIT(&hw_p->hxge_cfg_lock);
HXGE_DEBUG_MSG((hxgep, MOD_CTL,
"hxge_hw_init_niu_common Done for device id %x",
hw_p->parent_devp));
HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_hw_init_niu_common"));
}
示例12: canWrite
afs_int32
canWrite(int fid)
{
#ifndef AFS_PTHREAD_ENV
afs_int32 code = 0;
#endif
extern dumpSyncP dumpSyncPtr;
ObtainWriteLock(&dumpSyncPtr->ds_lock);
/* let the pipe drain */
while (dumpSyncPtr->ds_bytes > 0) {
if (dumpSyncPtr->ds_readerStatus == DS_WAITING) {
dumpSyncPtr->ds_readerStatus = 0;
#ifdef AFS_PTHREAD_ENV
CV_BROADCAST(&dumpSyncPtr->ds_readerStatus_cond);
#else
code = LWP_SignalProcess(&dumpSyncPtr->ds_readerStatus);
if (code)
LogError(code, "canWrite: Signal delivery failed\n");
#endif
}
dumpSyncPtr->ds_writerStatus = DS_WAITING;
ReleaseWriteLock(&dumpSyncPtr->ds_lock);
#ifdef AFS_PTHREAD_ENV
MUTEX_ENTER(&dumpSyncPtr->ds_writerStatus_mutex);
CV_WAIT(&dumpSyncPtr->ds_writerStatus_cond, &dumpSyncPtr->ds_writerStatus_mutex);
MUTEX_EXIT(&dumpSyncPtr->ds_writerStatus_mutex);
#else
LWP_WaitProcess(&dumpSyncPtr->ds_writerStatus);
#endif
ObtainWriteLock(&dumpSyncPtr->ds_lock);
}
return (1);
}
示例13: afs_cv_timedwait
void
afs_cv_timedwait(afs_kcondvar_t * cv, afs_kmutex_t * l, int waittime)
{
int seq, isAFSGlocked = ISAFS_GLOCK();
long t = waittime * HZ / 1000;
#ifdef DECLARE_WAITQUEUE
DECLARE_WAITQUEUE(wait, current);
#else
struct wait_queue wait = { current, NULL };
#endif
seq = cv->seq;
set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&cv->waitq, &wait);
if (isAFSGlocked)
AFS_GUNLOCK();
MUTEX_EXIT(l);
while(seq == cv->seq) {
t = schedule_timeout(t);
if (!t) /* timeout */
break;
}
remove_wait_queue(&cv->waitq, &wait);
set_current_state(TASK_RUNNING);
if (isAFSGlocked)
AFS_GLOCK();
MUTEX_ENTER(l);
}
示例14: tdispSetBacklight
void tdispSetBacklight(uint16_t percentage) {
if (percentage > 100)
percentage = 100;
MUTEX_ENTER();
tdisp_lld_set_backlight(percentage);
MUTEX_LEAVE();
}
示例15: valgrindClearRange
void valgrindClearRange(MM_GCExtensionsBase *extensions, uintptr_t baseAddress, uintptr_t size)
{
if (size == 0)
{
return;
}
uintptr_t topInclusiveAddr = baseAddress + size - 1;
#if defined(VALGRIND_REQUEST_LOGS)
VALGRIND_PRINTF_BACKTRACE("Clearing objects in range b/w 0x%lx and 0x%lx\n", baseAddress, topInclusiveAddr);
#endif /* defined(VALGRIND_REQUEST_LOGS) */
MUTEX_ENTER(extensions->memcheckHashTableMutex);
GC_HashTableIterator it(extensions->memcheckHashTable);
uintptr_t *currentSlotPointer = (uintptr_t *)it.nextSlot();
while (currentSlotPointer != NULL)
{
if (baseAddress <= *currentSlotPointer && topInclusiveAddr >= *currentSlotPointer)
{
valgrindFreeObjectDirect(extensions, *currentSlotPointer);
it.removeSlot();
}
currentSlotPointer = (uintptr_t *)it.nextSlot();
}
MUTEX_EXIT(extensions->memcheckHashTableMutex);
/* Valgrind automatically marks free objects as noaccess.
We still mark the entire region as no access for any left out areas */
valgrindMakeMemNoaccess(baseAddress, size);
}