本文整理汇总了C++中READ_ONCE函数的典型用法代码示例。如果您正苦于以下问题:C++ READ_ONCE函数的具体用法?C++ READ_ONCE怎么用?C++ READ_ONCE使用的例子?那么, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了READ_ONCE函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: mce_chrdev_poll
static unsigned int mce_chrdev_poll(struct file *file, poll_table *wait)
{
poll_wait(file, &mce_chrdev_wait, wait);
if (READ_ONCE(mcelog.next))
return POLLIN | POLLRDNORM;
if (!mce_apei_read_done && apei_check_mce())
return POLLIN | POLLRDNORM;
return 0;
}
示例2: array_map_lookup_elem
static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
{
struct bpf_map **inner_map = array_map_lookup_elem(map, key);
if (!inner_map)
return NULL;
return READ_ONCE(*inner_map);
}
示例3: hrtimer_active
/*
* A timer is active, when it is enqueued into the rbtree or the
* callback function is running or it's in the state of being migrated
* to another cpu.
*
* It is important for this function to not return a false negative.
*/
bool hrtimer_active(const struct hrtimer *timer)
{
struct hrtimer_cpu_base *cpu_base;
unsigned int seq;
do {
cpu_base = READ_ONCE(timer->base->cpu_base);
seq = raw_read_seqcount_begin(&cpu_base->seq);
if (timer->state != HRTIMER_STATE_INACTIVE ||
cpu_base->running == timer)
return true;
} while (read_seqcount_retry(&cpu_base->seq, seq) ||
cpu_base != READ_ONCE(timer->base->cpu_base));
return false;
}
示例4: hv_pkt_iter_close
/*
* Update host ring buffer after iterating over packets.
*/
void hv_pkt_iter_close(struct vmbus_channel *channel)
{
struct hv_ring_buffer_info *rbi = &channel->inbound;
u32 curr_write_sz, pending_sz, bytes_read, start_read_index;
/*
* Make sure all reads are done before we update the read index since
* the writer may start writing to the read area once the read index
* is updated.
*/
virt_rmb();
start_read_index = rbi->ring_buffer->read_index;
rbi->ring_buffer->read_index = rbi->priv_read_index;
if (!rbi->ring_buffer->feature_bits.feat_pending_send_sz)
return;
/*
* Issue a full memory barrier before making the signaling decision.
* Here is the reason for having this barrier:
* If the reading of the pend_sz (in this function)
* were to be reordered and read before we commit the new read
* index (in the calling function) we could
* have a problem. If the host were to set the pending_sz after we
* have sampled pending_sz and go to sleep before we commit the
* read index, we could miss sending the interrupt. Issue a full
* memory barrier to address this.
*/
virt_mb();
pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz);
if (!pending_sz)
return;
/*
* Ensure the read of write_index in hv_get_bytes_to_write()
* happens after the read of pending_send_sz.
*/
virt_rmb();
curr_write_sz = hv_get_bytes_to_write(rbi);
bytes_read = hv_pkt_iter_bytes_read(rbi, start_read_index);
/*
* If there was space before we began iteration,
* then host was not blocked.
*/
if (curr_write_sz - bytes_read > pending_sz)
return;
/* If pending write will not fit, don't give false hope. */
if (curr_write_sz <= pending_sz)
return;
vmbus_setevent(channel);
}
示例5: srcu_drive_gp
/*
* Workqueue handler to drive one grace period and invoke any callbacks
* that become ready as a result. Single-CPU and !PREEMPT operation
* means that we get away with murder on synchronization. ;-)
*/
void srcu_drive_gp(struct work_struct *wp)
{
int idx;
struct rcu_head *lh;
struct rcu_head *rhp;
struct srcu_struct *sp;
sp = container_of(wp, struct srcu_struct, srcu_work);
if (sp->srcu_gp_running || !READ_ONCE(sp->srcu_cb_head))
return; /* Already running or nothing to do. */
/* Remove recently arrived callbacks and wait for readers. */
WRITE_ONCE(sp->srcu_gp_running, true);
local_irq_disable();
lh = sp->srcu_cb_head;
sp->srcu_cb_head = NULL;
sp->srcu_cb_tail = &sp->srcu_cb_head;
local_irq_enable();
idx = sp->srcu_idx;
WRITE_ONCE(sp->srcu_idx, !sp->srcu_idx);
WRITE_ONCE(sp->srcu_gp_waiting, true); /* srcu_read_unlock() wakes! */
swait_event_exclusive(sp->srcu_wq, !READ_ONCE(sp->srcu_lock_nesting[idx]));
WRITE_ONCE(sp->srcu_gp_waiting, false); /* srcu_read_unlock() cheap. */
/* Invoke the callbacks we removed above. */
while (lh) {
rhp = lh;
lh = lh->next;
local_bh_disable();
rhp->func(rhp);
local_bh_enable();
}
/*
* Enable rescheduling, and if there are more callbacks,
* reschedule ourselves. This can race with a call_srcu()
* at interrupt level, but the ->srcu_gp_running checks will
* straighten that out.
*/
WRITE_ONCE(sp->srcu_gp_running, false);
if (READ_ONCE(sp->srcu_cb_head))
schedule_work(&sp->srcu_work);
}
示例6: hv_pkt_iter_avail
/*
* Determine number of bytes available in ring buffer after
* the current iterator (priv_read_index) location.
*
* This is similar to hv_get_bytes_to_read but with private
* read index instead.
*/
static u32 hv_pkt_iter_avail(const struct hv_ring_buffer_info *rbi)
{
u32 priv_read_loc = rbi->priv_read_index;
u32 write_loc = READ_ONCE(rbi->ring_buffer->write_index);
if (write_loc >= priv_read_loc)
return write_loc - priv_read_loc;
else
return (rbi->ring_datasize - priv_read_loc) + write_loc;
}
示例7: rxrpc_fill_out_ack
/*
* Fill out an ACK packet.
*/
static size_t rxrpc_fill_out_ack(struct rxrpc_call *call,
struct rxrpc_ack_buffer *pkt,
rxrpc_seq_t *_hard_ack,
rxrpc_seq_t *_top,
u8 reason)
{
rxrpc_serial_t serial;
rxrpc_seq_t hard_ack, top, seq;
int ix;
u32 mtu, jmax;
u8 *ackp = pkt->acks;
/* Barrier against rxrpc_input_data(). */
serial = call->ackr_serial;
hard_ack = READ_ONCE(call->rx_hard_ack);
top = smp_load_acquire(&call->rx_top);
*_hard_ack = hard_ack;
*_top = top;
pkt->ack.bufferSpace = htons(8);
pkt->ack.maxSkew = htons(call->ackr_skew);
pkt->ack.firstPacket = htonl(hard_ack + 1);
pkt->ack.previousPacket = htonl(call->ackr_prev_seq);
pkt->ack.serial = htonl(serial);
pkt->ack.reason = reason;
pkt->ack.nAcks = top - hard_ack;
if (reason == RXRPC_ACK_PING)
pkt->whdr.flags |= RXRPC_REQUEST_ACK;
if (after(top, hard_ack)) {
seq = hard_ack + 1;
do {
ix = seq & RXRPC_RXTX_BUFF_MASK;
if (call->rxtx_buffer[ix])
*ackp++ = RXRPC_ACK_TYPE_ACK;
else
*ackp++ = RXRPC_ACK_TYPE_NACK;
seq++;
} while (before_eq(seq, top));
}
mtu = call->conn->params.peer->if_mtu;
mtu -= call->conn->params.peer->hdrsize;
jmax = (call->nr_jumbo_bad > 3) ? 1 : rxrpc_rx_jumbo_max;
pkt->ackinfo.rxMTU = htonl(rxrpc_rx_mtu);
pkt->ackinfo.maxMTU = htonl(mtu);
pkt->ackinfo.rwind = htonl(call->rx_winsize);
pkt->ackinfo.jumbo_max = htonl(jmax);
*ackp++ = 0;
*ackp++ = 0;
*ackp++ = 0;
return top - hard_ack + 3;
}
示例8: WRITE_ONCE
void *thread_update(void *arg)
{
WRITE_ONCE(y, 1);
#ifndef FORCE_FAILURE_2
synchronize_srcu(&ss);
#endif
might_sleep();
__unbuffered_tpr_x = READ_ONCE(x);
return NULL;
}
示例9: kasan_pud_populate
static void __init kasan_pud_populate(pgd_t *pgdp, unsigned long addr,
unsigned long end, int node, bool early)
{
unsigned long next;
pud_t *pudp = kasan_pud_offset(pgdp, addr, node, early);
do {
next = pud_addr_end(addr, end);
kasan_pmd_populate(pudp, addr, next, node, early);
} while (pudp++, addr = next, addr != end && pud_none(READ_ONCE(*pudp)));
}
示例10: run_on
void *test_xchg_lock(void *arg)
{
int me = (long)arg;
run_on(me);
atomic_inc(&nthreadsrunning);
while (READ_ONCE(goflag) == GOFLAG_INIT)
poll(NULL, 0, 1);
while (READ_ONCE(goflag) == GOFLAG_RUN) {
xchg_lock(&testlock);
if (owner != -1)
lockerr++;
lockacqs++;
owner = me;
poll(NULL, 0, 1);
owner = -1;
xchg_unlock(&testlock);
}
return NULL;
}
示例11: kasan_copy_shadow
/*
* Copy the current shadow region into a new pgdir.
*/
void __init kasan_copy_shadow(pgd_t *pgdir)
{
pgd_t *pgdp, *pgdp_new, *pgdp_end;
pgdp = pgd_offset_k(KASAN_SHADOW_START);
pgdp_end = pgd_offset_k(KASAN_SHADOW_END);
pgdp_new = pgd_offset_raw(pgdir, KASAN_SHADOW_START);
do {
set_pgd(pgdp_new, READ_ONCE(*pgdp));
} while (pgdp++, pgdp_new++, pgdp != pgdp_end);
}
示例12: process_shares_mm
/*
* task->mm can be NULL if the task is the exited group leader. So to
* determine whether the task is using a particular mm, we examine all the
* task's threads: if one of those is using this mm then this task was also
* using it.
*/
static bool process_shares_mm(struct task_struct *p, struct mm_struct *mm)
{
struct task_struct *t;
for_each_thread(p, t) {
struct mm_struct *t_mm = READ_ONCE(t->mm);
if (t_mm)
return t_mm == mm;
}
return false;
}
示例13: hv_signal_on_write
static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel,
bool kick_q)
{
struct hv_ring_buffer_info *rbi = &channel->outbound;
virt_mb();
if (READ_ONCE(rbi->ring_buffer->interrupt_mask))
return;
/* check interrupt_mask before read_index */
virt_rmb();
/*
* This is the only case we need to signal when the
* ring transitions from being empty to non-empty.
*/
if (old_write == READ_ONCE(rbi->ring_buffer->read_index))
vmbus_setevent(channel);
return;
}
示例14: prepare_exit_to_usermode
/* Called with IRQs disabled. */
__visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
{
struct thread_info *ti = current_thread_info();
u32 cached_flags;
addr_limit_user_check();
lockdep_assert_irqs_disabled();
lockdep_sys_exit();
cached_flags = READ_ONCE(ti->flags);
if (unlikely(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS))
exit_to_usermode_loop(regs, cached_flags);
/* Reload ti->flags; we may have rescheduled above. */
cached_flags = READ_ONCE(ti->flags);
fpregs_assert_state_consistent();
if (unlikely(cached_flags & _TIF_NEED_FPU_LOAD))
switch_fpu_return();
#ifdef CONFIG_COMPAT
/*
* Compat syscalls set TS_COMPAT. Make sure we clear it before
* returning to user mode. We need to clear it *after* signal
* handling, because syscall restart has a fixup for compat
* syscalls. The fixup is exercised by the ptrace_syscall_32
* selftest.
*
* We also need to clear TS_REGS_POKED_I386: the 32-bit tracer
* special case only applies after poking regs and before the
* very next return to user mode.
*/
ti->status &= ~(TS_COMPAT|TS_I386_REGS_POKED);
#endif
user_enter_irqoff();
mds_user_clear_cpu_buffers();
}
示例15: quarantine_reduce
void quarantine_reduce(void)
{
size_t new_quarantine_size, percpu_quarantines;
unsigned long flags;
struct qlist_head to_free = QLIST_INIT;
size_t size_to_free = 0;
struct qlist_node *last;
if (likely(READ_ONCE(global_quarantine.bytes) <=
READ_ONCE(quarantine_size)))
return;
spin_lock_irqsave(&quarantine_lock, flags);
/*
* Update quarantine size in case of hotplug. Allocate a fraction of
* the installed memory to quarantine minus per-cpu queue limits.
*/
new_quarantine_size = (READ_ONCE(totalram_pages) << PAGE_SHIFT) /
QUARANTINE_FRACTION;
percpu_quarantines = QUARANTINE_PERCPU_SIZE * num_online_cpus();
new_quarantine_size = (new_quarantine_size < percpu_quarantines) ?
0 : new_quarantine_size - percpu_quarantines;
WRITE_ONCE(quarantine_size, new_quarantine_size);
last = global_quarantine.head;
while (last) {
struct kmem_cache *cache = qlink_to_cache(last);
size_to_free += cache->size;
if (!last->next || size_to_free >
global_quarantine.bytes - QUARANTINE_LOW_SIZE)
break;
last = last->next;
}
qlist_move(&global_quarantine, last, &to_free, size_to_free);
spin_unlock_irqrestore(&quarantine_lock, flags);
qlist_free_all(&to_free, NULL);
}