本文整理汇总了C++中MUTEX_HELD函数的典型用法代码示例。如果您正苦于以下问题:C++ MUTEX_HELD函数的具体用法?C++ MUTEX_HELD怎么用?C++ MUTEX_HELD使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了MUTEX_HELD函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: __zvol_remove_minor
static int
__zvol_remove_minor(const char *name)
{
zvol_state_t *zv;
ASSERT(MUTEX_HELD(&zvol_state_lock));
zv = zvol_find_by_name(name);
if (zv == NULL)
return (ENXIO);
if (zv->zv_open_count > 0)
return (EBUSY);
zvol_remove(zv);
zvol_free(zv);
return (0);
}
示例2: xnbo_mcast_find
/*
* Find the multicast address `addr', return B_TRUE if it is one that
* we receive. If `remove', remove it from the set received.
*/
static boolean_t
xnbo_mcast_find(xnb_t *xnbp, ether_addr_t *addr, boolean_t remove)
{
xnbo_t *xnbop = xnbp->xnb_flavour_data;
xmca_t *prev, *del, *this;
ASSERT(MUTEX_HELD(&xnbp->xnb_state_lock));
ASSERT(xnbop->o_promiscuous == B_FALSE);
prev = del = NULL;
this = xnbop->o_mca;
while (this != NULL) {
if (bcmp(&this->addr, addr, sizeof (this->addr)) == 0) {
del = this;
if (remove) {
if (prev == NULL)
xnbop->o_mca = this->next;
else
prev->next = this->next;
}
break;
}
prev = this;
this = this->next;
}
if (del == NULL)
return (B_FALSE);
if (remove) {
DTRACE_PROBE3(mcast_remove,
(char *), "remove",
(void *), xnbp,
(etheraddr_t *), del->addr);
mac_multicast_remove(xnbop->o_mch, del->addr);
kmem_free(del, sizeof (*del));
}
return (B_TRUE);
}
示例3: zfsctl_snapshot_rename
/*
* Rename a zfs_snapentry_t in the zfs_snapshots_by_name. The structure is
* removed, renamed, and added back to the new correct location in the tree.
*/
static int
zfsctl_snapshot_rename(char *old_snapname, char *new_snapname)
{
zfs_snapentry_t *se;
ASSERT(MUTEX_HELD(&zfs_snapshot_lock));
se = zfsctl_snapshot_find_by_name(old_snapname);
if (se == NULL)
return (ENOENT);
zfsctl_snapshot_remove(se);
strfree(se->se_name);
se->se_name = strdup(new_snapname);
zfsctl_snapshot_add(se);
zfsctl_snapshot_rele(se);
return (0);
}
示例4: port_remove_fd_object
/*
* The port_remove_fd_object() function frees all resources associated with
* delivered portfd_t structure. Returns 1 if the port_kevent was found
* and removed from the port queue.
*/
int
port_remove_fd_object(portfd_t *pfd, port_t *pp, port_fdcache_t *pcp)
{
port_queue_t *portq;
polldat_t *pdp = PFTOD(pfd);
port_kevent_t *pkevp;
int error;
int removed = 0;
ASSERT(MUTEX_HELD(&pcp->pc_lock));
if (pdp->pd_php != NULL) {
pollhead_delete(pdp->pd_php, pdp);
pdp->pd_php = NULL;
}
pkevp = pdp->pd_portev;
portq = &pp->port_queue;
mutex_enter(&portq->portq_mutex);
port_block(portq);
if (pkevp->portkev_flags & PORT_KEV_DONEQ) {
if (portq->portq_getn && portq->portq_tnent) {
/*
* move events from the temporary "get" queue
* back to the port queue
*/
port_push_eventq(portq);
}
/* cleanup merged port queue */
port_remove_event_doneq(pkevp, portq);
removed = 1;
}
port_unblock(portq);
mutex_exit(&portq->portq_mutex);
if (pkevp->portkev_callback) {
(void) (*pkevp->portkev_callback)(pkevp->portkev_arg,
&error, pkevp->portkev_pid, PORT_CALLBACK_DISSOCIATE,
pkevp);
}
port_free_event_local(pkevp, 0);
/* remove polldat struct */
port_pcache_remove_fd(pcp, pfd);
return (removed);
}
示例5: oplmsu_search_min_stop_path
/*
* Search path of "offline:stop" status, and minimum path number
*
* Requires Lock (( M: Mandatory, P: Prohibited, A: Allowed ))
* -. uinst_t->lock : M [RW_READER or RW_WRITER]
* -. uinst_t->u_lock : M
* -. uinst_t->l_lock : P
* -. uinst_t->c_lock : P
*/
void
oplmsu_search_min_stop_path(void)
{
upath_t *upath, *min_upath;
lpath_t *lpath;
int min_no = UNDEFINED;
int active_flag = 0;
ASSERT(RW_LOCK_HELD(&oplmsu_uinst->lock));
ASSERT(MUTEX_HELD(&oplmsu_uinst->u_lock));
upath = oplmsu_uinst->first_upath;
while (upath) {
if ((upath->status == MSU_PSTAT_ACTIVE) &&
(upath->traditional_status == MSU_ACTIVE)) {
active_flag = 1;
break;
} else if ((upath->status == MSU_PSTAT_STOP) &&
(upath->traditional_status == MSU_STOP)) {
if (upath->lpath != NULL) {
if ((min_no == UNDEFINED) ||
(upath->path_no < min_no)) {
lpath = upath->lpath;
mutex_enter(&oplmsu_uinst->l_lock);
if (lpath->status == MSU_EXT_NOTUSED) {
min_upath = upath;
min_no = upath->path_no;
}
mutex_exit(&oplmsu_uinst->l_lock);
}
}
}
upath = upath->u_next;
}
if (active_flag == 0) {
lpath = min_upath->lpath;
mutex_enter(&oplmsu_uinst->l_lock);
lpath->src_upath = NULL;
lpath->status = MSU_EXT_ACTIVE_CANDIDATE;
mutex_exit(&oplmsu_uinst->l_lock);
}
}
示例6: oplmsu_check_lpath_usable
/*
* Check whether lower path is usable by lower path info table address
*
* Requires Lock (( M: Mandatory, P: Prohibited, A: Allowed ))
* -. uinst_t->lock : M [RW_READER or RW_WRITER]
* -. uinst_t->u_lock : A
* -. uinst_t->l_lock : M
* -. uinst_t->c_lock : P
*/
int
oplmsu_check_lpath_usable(void)
{
lpath_t *lpath;
int rval = SUCCESS;
ASSERT(RW_LOCK_HELD(&oplmsu_uinst->lock));
ASSERT(MUTEX_HELD(&oplmsu_uinst->l_lock));
lpath = oplmsu_uinst->first_lpath;
while (lpath) {
if ((lpath->hndl_uqueue != NULL) || (lpath->hndl_mp != NULL)) {
rval = BUSY;
break;
}
lpath = lpath->l_next;
}
return (rval);
}
示例7: zvol_find_minor
/*
* Find the next available range of ZVOL_MINORS minor numbers. The
* zvol_state_list is kept in ascending minor order so we simply need
* to scan the list for the first gap in the sequence. This allows us
* to recycle minor number as devices are created and removed.
*/
static int
zvol_find_minor(unsigned *minor)
{
zvol_state_t *zv;
*minor = 0;
ASSERT(MUTEX_HELD(&zvol_state_lock));
for (zv = list_head(&zvol_state_list); zv != NULL;
zv = list_next(&zvol_state_list, zv), *minor += ZVOL_MINORS) {
if (MINOR(zv->zv_dev) != MINOR(*minor))
break;
}
/* All minors are in use */
if (*minor >= (1 << MINORBITS))
return ENXIO;
return 0;
}
示例8: cyclic_expand
/*
* cyclic_expand() will cross call onto the CPU to perform the actual
* expand operation.
*/
static void
cyclic_expand(cyc_cpu_t *cpu)
{
cyc_index_t new_size, old_size;
cyc_index_t *new_heap, *old_heap;
cyclic_t *new_cyclics, *old_cyclics;
cyc_xcallarg_t arg;
cyc_backend_t *be = cpu->cyp_backend;
ASSERT(MUTEX_HELD(&cpu_lock));
old_heap = cpu->cyp_heap;
old_cyclics = cpu->cyp_cyclics;
if ((new_size = ((old_size = cpu->cyp_size) << 1)) == 0) {
new_size = CY_DEFAULT_PERCPU;
ASSERT(old_heap == NULL && old_cyclics == NULL);
}
/*
* Check that the new_size is a power of 2.
*/
ASSERT(((new_size - 1) & new_size) == 0);
new_heap = malloc(sizeof(cyc_index_t) * new_size, M_CYCLIC, M_WAITOK);
new_cyclics = malloc(sizeof(cyclic_t) * new_size, M_CYCLIC, M_ZERO | M_WAITOK);
arg.cyx_cpu = cpu;
arg.cyx_heap = new_heap;
arg.cyx_cyclics = new_cyclics;
arg.cyx_size = new_size;
be->cyb_xcall(be->cyb_arg, cpu->cyp_cpu,
(cyc_func_t)cyclic_expand_xcall, &arg);
if (old_cyclics != NULL) {
ASSERT(old_heap != NULL);
ASSERT(old_size != 0);
free(old_cyclics, M_CYCLIC);
free(old_heap, M_CYCLIC);
}
}
示例9: taskq_ent_alloc
/*
* taskq_ent_alloc()
*
* Allocates a new taskq_ent_t structure either from the free list or from the
* cache. Returns NULL if it can't be allocated.
*
* Assumes: tq->tq_lock is held.
*/
static taskq_ent_t *
taskq_ent_alloc(taskq_t *tq, int flags)
{
int kmflags = KM_NOSLEEP;
taskq_ent_t *tqe;
ASSERT(MUTEX_HELD(&tq->tq_lock));
/*
* TQ_NOALLOC allocations are allowed to use the freelist, even if
* we are below tq_minalloc.
*/
if ((tqe = tq->tq_freelist) != NULL &&
((flags & TQ_NOALLOC) || tq->tq_nalloc >= tq->tq_minalloc)) {
tq->tq_freelist = tqe->tqent_next;
} else {
if (flags & TQ_NOALLOC)
return (NULL);
mutex_exit(&tq->tq_lock);
if (tq->tq_nalloc >= tq->tq_maxalloc) {
if (kmflags & KM_NOSLEEP) {
mutex_enter(&tq->tq_lock);
return (NULL);
}
/*
* We don't want to exceed tq_maxalloc, but we can't
* wait for other tasks to complete (and thus free up
* task structures) without risking deadlock with
* the caller. So, we just delay for one second
* to throttle the allocation rate.
*/
delay(hz);
}
tqe = kmem_cache_alloc(taskq_ent_cache, kmflags);
mutex_enter(&tq->tq_lock);
if (tqe != NULL)
tq->tq_nalloc++;
}
return (tqe);
}
示例10: ehc_write_pcf8591
/*
* Write to the PCF8591 chip.
* byteaddress = chip type base address | chip offset address.
*/
int
ehc_write_pcf8591(struct ehc_envcunit *ehcp, int byteaddress, int channel,
int autoinc, int amode, int aenable, uint8_t *buf, int size)
{
int i, status;
register uint8_t control;
ASSERT((byteaddress & 0x1) == 0);
ASSERT(MUTEX_HELD(&ehcp->umutex));
control = ((aenable << 6) | (amode << 4) | (autoinc << 2) | channel);
status = ehc_start_pcf8584(ehcp, byteaddress);
if (status != EHC_SUCCESS) {
if (status == EHC_NO_SLAVE_ACK) {
/*
* Send the "stop" condition.
*/
ehc_stop_pcf8584(ehcp);
}
return (EHC_FAILURE);
}
if ((status = ehc_write_pcf8584(ehcp, control)) != EHC_SUCCESS) {
if (status == EHC_NO_SLAVE_ACK)
ehc_stop_pcf8584(ehcp);
return (EHC_FAILURE);
}
for (i = 0; i < size; i++) {
status = ehc_write_pcf8584(ehcp, buf[i]);
if (status != EHC_SUCCESS) {
if (status == EHC_NO_SLAVE_ACK)
ehc_stop_pcf8584(ehcp);
return (EHC_FAILURE);
}
}
ehc_stop_pcf8584(ehcp);
return (EHC_SUCCESS);
}
示例11: ehc_write_pcf8574a
/*
* Write to the PCF8574A chip.
* byteaddress = chip type base address | chip offset address.
*/
int
ehc_write_pcf8574a(struct ehc_envcunit *ehcp, int byteaddress, uint8_t *buf,
int size)
{
int i;
int status;
ASSERT((byteaddress & 0x1) == 0);
ASSERT(MUTEX_HELD(&ehcp->umutex));
/*
* Put the bus into the start condition (write)
*/
if ((status = ehc_start_pcf8584(ehcp, byteaddress)) != EHC_SUCCESS) {
if (status == EHC_NO_SLAVE_ACK) {
/*
* Send the "stop" condition.
*/
ehc_stop_pcf8584(ehcp);
}
return (EHC_FAILURE);
}
/*
* Send the data - poll as needed.
*/
for (i = 0; i < size; i++) {
if ((status = ehc_write_pcf8584(ehcp, buf[i])) != EHC_SUCCESS) {
if (status == EHC_NO_SLAVE_ACK)
ehc_stop_pcf8584(ehcp);
return (EHC_FAILURE);
}
}
/*
* Transmission complete - generate stop condition and
* put device back into slave receiver mode.
*/
ehc_stop_pcf8584(ehcp);
return (EHC_SUCCESS);
}
示例12: sctp_lookup
/*
* Similar to but more general than ip_sctp's conn_match().
*
* Matches sets of addresses as follows: if the argument addr set is
* a complete subset of the corresponding addr set in the sctp_t, it
* is a match.
*
* Caller must hold tf->tf_lock.
*
* Returns with a SCTP_REFHOLD sctp structure. Caller must do a SCTP_REFRELE.
*/
sctp_t *
sctp_lookup(sctp_t *sctp1, in6_addr_t *faddr, sctp_tf_t *tf, uint32_t *ports,
int min_state)
{
sctp_t *sctp;
sctp_faddr_t *fp;
ASSERT(MUTEX_HELD(&tf->tf_lock));
for (sctp = tf->tf_sctp; sctp; sctp = sctp->sctp_conn_hash_next) {
if (*ports != sctp->sctp_ports || sctp->sctp_state <
min_state) {
continue;
}
/* check for faddr match */
for (fp = sctp->sctp_faddrs; fp; fp = fp->next) {
if (IN6_ARE_ADDR_EQUAL(faddr, &fp->faddr)) {
break;
}
}
if (!fp) {
/* no faddr match; keep looking */
continue;
}
/* check for laddr subset match */
if (sctp_compare_saddrs(sctp1, sctp) <= SCTP_ADDR_SUBSET) {
goto done;
}
/* no match; continue searching */
}
done:
if (sctp) {
SCTP_REFHOLD(sctp);
}
return (sctp);
}
示例13: trim_map_free_locked
static void
trim_map_free_locked(trim_map_t *tm, uint64_t start, uint64_t end, uint64_t txg)
{
zio_t zsearch, *zs;
ASSERT(MUTEX_HELD(&tm->tm_lock));
zsearch.io_offset = start;
zsearch.io_size = end - start;
zs = avl_find(&tm->tm_inflight_writes, &zsearch, NULL);
if (zs == NULL) {
trim_map_segment_add(tm, start, end, txg);
return;
}
if (start < zs->io_offset)
trim_map_free_locked(tm, start, zs->io_offset, txg);
if (zs->io_offset + zs->io_size < end)
trim_map_free_locked(tm, zs->io_offset + zs->io_size, end, txg);
}
示例14: port_remove_portfd
/*
* The port_remove_portfd() function dissociates the port from the fd
* and vive versa.
*/
static void
port_remove_portfd(polldat_t *pdp, port_fdcache_t *pcp)
{
port_t *pp;
file_t *fp;
ASSERT(MUTEX_HELD(&pcp->pc_lock));
pp = pdp->pd_portev->portkev_port;
fp = getf(pdp->pd_fd);
/*
* If we did not get the fp for pd_fd but its portfd_t
* still exist in the cache, it means the pd_fd is being
* closed by some other thread which will also free the portfd_t.
*/
if (fp != NULL) {
delfd_port(pdp->pd_fd, PDTOF(pdp));
releasef(pdp->pd_fd);
port_remove_fd_object(PDTOF(pdp), pp, pcp);
}
}
示例15: vdev_queue_io_add
static void
vdev_queue_io_add(vdev_queue_t *vq, zio_t *zio)
{
spa_t *spa = zio->io_spa;
avl_tree_t *qtt;
ASSERT(MUTEX_HELD(&vq->vq_lock));
ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
avl_add(vdev_queue_class_tree(vq, zio->io_priority), zio);
qtt = vdev_queue_type_tree(vq, zio->io_type);
if (qtt)
avl_add(qtt, zio);
#ifdef illumos
mutex_enter(&spa->spa_iokstat_lock);
spa->spa_queue_stats[zio->io_priority].spa_queued++;
if (spa->spa_iokstat != NULL)
kstat_waitq_enter(spa->spa_iokstat->ks_data);
mutex_exit(&spa->spa_iokstat_lock);
#endif
}