本文整理汇总了C++中simple_unlock函数的典型用法代码示例。如果您正苦于以下问题:C++ simple_unlock函数的具体用法?C++ simple_unlock怎么用?C++ simple_unlock使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了simple_unlock函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: char_open_done
/*
* Retry wait for CARR_ON for open.
* No locks may be held.
* May run on any CPU.
*/
boolean_t char_open_done(
io_req_t ior)
{
register struct tty *tp = (struct tty *)ior->io_dev_ptr;
spl_t s = spltty();
simple_lock(&tp->t_lock);
if ((tp->t_state & TS_ISOPEN) == 0) {
queue_delayed_reply(&tp->t_delayed_open, ior, char_open_done);
simple_unlock(&tp->t_lock);
splx(s);
return FALSE;
}
tp->t_state |= TS_ISOPEN;
tp->t_state &= ~TS_WOPEN;
if (tp->t_mctl)
(*tp->t_mctl)(tp, TM_RTS, DMBIS);
simple_unlock(&tp->t_lock);
splx(s);
ior->io_error = D_SUCCESS;
(void) ds_open_done(ior);
return TRUE;
}
示例2: char_write_done
/*
* Retry wait for output queue emptied, for write.
* No locks may be held.
* May run on any CPU.
*/
boolean_t char_write_done(
register io_req_t ior)
{
register struct tty *tp = (struct tty *)ior->io_dev_ptr;
register spl_t s = spltty();
simple_lock(&tp->t_lock);
if (tp->t_outq.c_cc > TTHIWAT(tp) ||
(tp->t_state & TS_CARR_ON) == 0) {
queue_delayed_reply(&tp->t_delayed_write, ior, char_write_done);
simple_unlock(&tp->t_lock);
splx(s);
return FALSE;
}
simple_unlock(&tp->t_lock);
splx(s);
if (IP_VALID(ior->io_reply_port)) {
(void) (*((ior->io_op & IO_INBAND) ?
ds_device_write_reply_inband :
ds_device_write_reply))(ior->io_reply_port,
ior->io_reply_port_type,
ior->io_error,
(int) (ior->io_total -
ior->io_residual));
}
mach_device_deallocate(ior->io_device);
return TRUE;
}
示例3: cpu_exit_wait
void
cpu_exit_wait(
int cpu)
{
cpu_data_t *cdp = cpu_datap(cpu);
boolean_t intrs_enabled;
uint64_t tsc_timeout;
/*
* Wait until the CPU indicates that it has stopped.
* Disable interrupts while the topo lock is held -- arguably
* this should always be done but in this instance it can lead to
* a timeout if long-running interrupt were to occur here.
*/
intrs_enabled = ml_set_interrupts_enabled(FALSE);
simple_lock(&x86_topo_lock);
/* Set a generous timeout of several seconds (in TSC ticks) */
tsc_timeout = rdtsc64() + (10ULL * 1000 * 1000 * 1000);
while ((cdp->lcpu.state != LCPU_HALT)
&& (cdp->lcpu.state != LCPU_OFF)
&& !cdp->lcpu.stopped) {
simple_unlock(&x86_topo_lock);
ml_set_interrupts_enabled(intrs_enabled);
cpu_pause();
if (rdtsc64() > tsc_timeout)
panic("cpu_exit_wait(%d) timeout", cpu);
ml_set_interrupts_enabled(FALSE);
simple_lock(&x86_topo_lock);
}
simple_unlock(&x86_topo_lock);
ml_set_interrupts_enabled(intrs_enabled);
}
示例4: dmio_usrreq_done
/*
* dmio_usrreq_done:
*
* Dmover completion callback.
*/
static void
dmio_usrreq_done(struct dmover_request *dreq)
{
struct dmio_usrreq_state *dus = dreq->dreq_cookie;
struct dmio_state *ds = dreq->dreq_session->dses_cookie;
/* We're already at splsoftclock(). */
simple_lock(&ds->ds_slock);
TAILQ_REMOVE(&ds->ds_pending, dus, dus_q);
if (ds->ds_flags & DMIO_STATE_DEAD) {
ds->ds_nreqs--;
dmio_usrreq_fini(ds, dus);
dmover_request_free(dreq);
if (ds->ds_nreqs == 0) {
simple_unlock(&ds->ds_slock);
seldestroy(&ds->ds_selq);
pool_put(&dmio_state_pool, ds);
return;
}
} else {
TAILQ_INSERT_TAIL(&ds->ds_complete, dus, dus_q);
if (ds->ds_flags & DMIO_STATE_READ_WAIT) {
ds->ds_flags &= ~DMIO_STATE_READ_WAIT;
wakeup(&ds->ds_complete);
}
if (ds->ds_flags & DMIO_STATE_SEL) {
ds->ds_flags &= ~DMIO_STATE_SEL;
selnotify(&ds->ds_selq, POLLOUT | POLLWRNORM, 0);
}
}
simple_unlock(&ds->ds_slock);
}
示例5: char_read_done
/*
* Retry wait for characters, for read.
* No locks may be held.
* May run on any CPU - does not talk to device driver.
*/
boolean_t char_read_done(
register io_req_t ior)
{
register struct tty *tp = (struct tty *)ior->io_dev_ptr;
register spl_t s = spltty();
simple_lock(&tp->t_lock);
if (tp->t_inq.c_cc <= 0 ||
(tp->t_state & TS_CARR_ON) == 0) {
queue_delayed_reply(&tp->t_delayed_read, ior, char_read_done);
simple_unlock(&tp->t_lock);
splx(s);
return FALSE;
}
ior->io_residual = ior->io_count - q_to_b(&tp->t_inq,
ior->io_data,
(int)ior->io_count);
if (tp->t_state & TS_RTS_DOWN) {
(*tp->t_mctl)(tp, TM_RTS, DMBIS);
tp->t_state &= ~TS_RTS_DOWN;
}
simple_unlock(&tp->t_lock);
splx(s);
(void) ds_read_done(ior);
return TRUE;
}
示例6: lock_try_read_to_write
/*
* Routine: lock_try_read_to_write
* Function:
* Improves a read-only lock to one with
* write permission. If another reader has
* already requested an upgrade to a write lock,
* the read lock is still held upon return.
*
* Returns FALSE if the upgrade *failed*.
*/
boolean_t lock_try_read_to_write(
register lock_t l)
{
check_simple_locks();
simple_lock(&l->interlock);
if (l->thread == current_thread()) {
/*
* Recursive lock
*/
l->read_count--;
l->recursion_depth++;
simple_unlock(&l->interlock);
return TRUE;
}
if (l->want_upgrade) {
simple_unlock(&l->interlock);
return FALSE;
}
l->want_upgrade = TRUE;
l->read_count--;
while (l->read_count != 0) {
l->waiting = TRUE;
thread_sleep(l,
simple_lock_addr(l->interlock), FALSE);
simple_lock(&l->interlock);
}
simple_unlock(&l->interlock);
return TRUE;
}
示例7: lock_try_write
boolean_t lock_try_write(
register lock_t l)
{
simple_lock(&l->interlock);
if (l->thread == current_thread()) {
/*
* Recursive lock
*/
l->recursion_depth++;
simple_unlock(&l->interlock);
return TRUE;
}
if (l->want_write || l->want_upgrade || l->read_count) {
/*
* Can't get lock.
*/
simple_unlock(&l->interlock);
return FALSE;
}
/*
* Have lock.
*/
l->want_write = TRUE;
simple_unlock(&l->interlock);
return TRUE;
}
示例8: action_thread
void
action_thread(void)
{
register processor_t processor;
spl_t s;
thread_swappable(current_act(), FALSE);
while (TRUE) {
s = splsched();
simple_lock(&action_lock);
while ( !queue_empty(&action_queue)) {
processor = (processor_t) queue_first(&action_queue);
queue_remove(&action_queue, processor, processor_t,
processor_queue);
simple_unlock(&action_lock);
splx(s);
processor_doaction(processor);
s = splsched();
simple_lock(&action_lock);
}
assert_wait((event_t) &action_queue, FALSE);
simple_unlock(&action_lock);
splx(s);
counter(c_action_thread_block++);
thread_block((void (*)(void)) 0);
}
}
示例9: lock_try_read
boolean_t
lock_try_read(
register lock_t * l)
{
start_data_node_t entry = {0};
unsigned short trace = 0;
pc_t pc;
ETAP_STAMP(lock_event_table(l), trace, trace);
ETAP_CREATE_ENTRY(entry, trace);
simple_lock(&l->interlock);
if (l->want_write || l->want_upgrade) {
simple_unlock(&l->interlock);
ETAP_DESTROY_ENTRY(entry);
return(FALSE);
}
l->read_count++;
ETAP_LINK_ENTRY(l, entry, trace);
simple_unlock(&l->interlock);
MON_ASSIGN_PC(entry->start_pc, pc, trace);
ETAP_DURATION_TIMESTAMP(entry, trace);
return(TRUE);
}
示例10: thread_stack_daemon
/*
* thread_stack_daemon:
*
* Perform stack allocation as required due to
* invoke failures.
*/
static void
thread_stack_daemon(void)
{
thread_t thread;
simple_lock(&thread_stack_lock);
while ((thread = (thread_t)dequeue_head(&thread_stack_queue)) != THREAD_NULL) {
simple_unlock(&thread_stack_lock);
stack_alloc(thread);
(void)splsched();
thread_lock(thread);
thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ);
thread_unlock(thread);
(void)spllo();
simple_lock(&thread_stack_lock);
}
assert_wait((event_t)&thread_stack_queue, THREAD_UNINT);
simple_unlock(&thread_stack_lock);
thread_block((thread_continue_t)thread_stack_daemon);
/*NOTREACHED*/
}
示例11: get_sched_statistics
kern_return_t
get_sched_statistics(
struct _processor_statistics_np *out,
uint32_t *count)
{
processor_t processor;
if (!sched_stats_active) {
return KERN_FAILURE;
}
simple_lock(&processor_list_lock);
if (*count < (processor_count + 2) * sizeof(struct _processor_statistics_np)) { /* One for RT, one for FS */
simple_unlock(&processor_list_lock);
return KERN_FAILURE;
}
processor = processor_list;
while (processor) {
struct processor_sched_statistics *stats = &processor->processor_data.sched_stats;
out->ps_cpuid = processor->cpu_id;
out->ps_csw_count = stats->csw_count;
out->ps_preempt_count = stats->preempt_count;
out->ps_preempted_rt_count = stats->preempted_rt_count;
out->ps_preempted_by_rt_count = stats->preempted_by_rt_count;
out->ps_rt_sched_count = stats->rt_sched_count;
out->ps_interrupt_count = stats->interrupt_count;
out->ps_ipi_count = stats->ipi_count;
out->ps_timer_pop_count = stats->timer_pop_count;
out->ps_runq_count_sum = SCHED(processor_runq_stats_count_sum)(processor);
out->ps_idle_transitions = stats->idle_transitions;
out->ps_quantum_timer_expirations = stats->quantum_timer_expirations;
out++;
processor = processor->processor_list;
}
*count = (uint32_t) (processor_count * sizeof(struct _processor_statistics_np));
simple_unlock(&processor_list_lock);
/* And include RT Queue information */
bzero(out, sizeof(*out));
out->ps_cpuid = (-1);
out->ps_runq_count_sum = rt_runq.runq_stats.count_sum;
out++;
*count += (uint32_t)sizeof(struct _processor_statistics_np);
/* And include Fair Share Queue information at the end */
bzero(out, sizeof(*out));
out->ps_cpuid = (-2);
out->ps_runq_count_sum = SCHED(fairshare_runq_stats_count_sum)();
*count += (uint32_t)sizeof(struct _processor_statistics_np);
return KERN_SUCCESS;
}
示例12: lock_write
void lock_write(
register lock_t l)
{
register int i;
check_simple_locks();
simple_lock(&l->interlock);
if (l->thread == current_thread()) {
/*
* Recursive lock.
*/
l->recursion_depth++;
simple_unlock(&l->interlock);
return;
}
/*
* Try to acquire the want_write bit.
*/
while (l->want_write) {
if ((i = lock_wait_time) > 0) {
simple_unlock(&l->interlock);
while (--i > 0 && l->want_write)
continue;
simple_lock(&l->interlock);
}
if (l->can_sleep && l->want_write) {
l->waiting = TRUE;
thread_sleep(l,
simple_lock_addr(l->interlock), FALSE);
simple_lock(&l->interlock);
}
}
l->want_write = TRUE;
/* Wait for readers (and upgrades) to finish */
while ((l->read_count != 0) || l->want_upgrade) {
if ((i = lock_wait_time) > 0) {
simple_unlock(&l->interlock);
while (--i > 0 && (l->read_count != 0 ||
l->want_upgrade))
continue;
simple_lock(&l->interlock);
}
if (l->can_sleep && (l->read_count != 0 || l->want_upgrade)) {
l->waiting = TRUE;
thread_sleep(l,
simple_lock_addr(l->interlock), FALSE);
simple_lock(&l->interlock);
}
}
simple_unlock(&l->interlock);
}
示例13: lock_read_to_write
/*
* Routine: lock_read_to_write
* Function:
* Improves a read-only lock to one with
* write permission. If another reader has
* already requested an upgrade to a write lock,
* no lock is held upon return.
*
* Returns TRUE if the upgrade *failed*.
*/
boolean_t lock_read_to_write(
register lock_t l)
{
register int i;
check_simple_locks();
simple_lock(&l->interlock);
l->read_count--;
if (l->thread == current_thread()) {
/*
* Recursive lock.
*/
l->recursion_depth++;
simple_unlock(&l->interlock);
return(FALSE);
}
if (l->want_upgrade) {
/*
* Someone else has requested upgrade.
* Since we've released a read lock, wake
* him up.
*/
if (l->waiting && (l->read_count == 0)) {
l->waiting = FALSE;
thread_wakeup(l);
}
simple_unlock(&l->interlock);
return TRUE;
}
l->want_upgrade = TRUE;
while (l->read_count != 0) {
if ((i = lock_wait_time) > 0) {
simple_unlock(&l->interlock);
while (--i > 0 && l->read_count != 0)
continue;
simple_lock(&l->interlock);
}
if (l->can_sleep && l->read_count != 0) {
l->waiting = TRUE;
thread_sleep(l,
simple_lock_addr(l->interlock), FALSE);
simple_lock(&l->interlock);
}
}
simple_unlock(&l->interlock);
return FALSE;
}
示例14: dmio_ioctl
/*
* dmio_ioctl:
*
* Ioctl file op.
*/
static int
dmio_ioctl(struct file *fp, u_long cmd, void *data)
{
struct dmio_state *ds = (struct dmio_state *) fp->f_data;
int error, s;
switch (cmd) {
case FIONBIO:
case FIOASYNC:
return (0);
case DMIO_SETFUNC:
{
struct dmio_setfunc *dsf = data;
struct dmover_session *dses;
s = splsoftclock();
simple_lock(&ds->ds_slock);
if (ds->ds_session != NULL ||
(ds->ds_flags & DMIO_STATE_LARVAL) != 0) {
simple_unlock(&ds->ds_slock);
splx(s);
return (EBUSY);
}
ds->ds_flags |= DMIO_STATE_LARVAL;
simple_unlock(&ds->ds_slock);
splx(s);
dsf->dsf_name[DMIO_MAX_FUNCNAME - 1] = '\0';
error = dmover_session_create(dsf->dsf_name, &dses);
s = splsoftclock();
simple_lock(&ds->ds_slock);
if (error == 0) {
dses->dses_cookie = ds;
ds->ds_session = dses;
}
ds->ds_flags &= ~DMIO_STATE_LARVAL;
simple_unlock(&ds->ds_slock);
splx(s);
break;
}
default:
error = ENOTTY;
}
return (error);
}
示例15: rf_RaidIOThread
static void
rf_RaidIOThread(RF_ThreadArg_t arg)
{
RF_Raid_t *raidPtr;
RF_DiskQueueData_t *req;
int s;
raidPtr = (RF_Raid_t *) arg;
s = splbio();
simple_lock(&(raidPtr->iodone_lock));
while (!raidPtr->shutdown_raidio) {
/* if there is nothing to do, then snooze. */
if (TAILQ_EMPTY(&(raidPtr->iodone)) &&
rf_buf_queue_check(raidPtr->raidid)) {
ltsleep(&(raidPtr->iodone), PRIBIO, "raidiow", 0,
&(raidPtr->iodone_lock));
}
/* Check for deferred parity-map-related work. */
if (raidPtr->parity_map != NULL) {
simple_unlock(&(raidPtr->iodone_lock));
rf_paritymap_checkwork(raidPtr->parity_map);
simple_lock(&(raidPtr->iodone_lock));
}
/* See what I/Os, if any, have arrived */
while ((req = TAILQ_FIRST(&(raidPtr->iodone))) != NULL) {
TAILQ_REMOVE(&(raidPtr->iodone), req, iodone_entries);
simple_unlock(&(raidPtr->iodone_lock));
rf_DiskIOComplete(req->queue, req, req->error);
(req->CompleteFunc) (req->argument, req->error);
simple_lock(&(raidPtr->iodone_lock));
}
/* process any pending outgoing IO */
simple_unlock(&(raidPtr->iodone_lock));
raidstart(raidPtr);
simple_lock(&(raidPtr->iodone_lock));
}
/* Let rf_ShutdownEngine know that we're done... */
raidPtr->shutdown_raidio = 0;
wakeup(&(raidPtr->shutdown_raidio));
simple_unlock(&(raidPtr->iodone_lock));
splx(s);
kthread_exit(0);
}