本文整理汇总了C++中smp_rmb函数的典型用法代码示例。如果您正苦于以下问题:C++ smp_rmb函数的具体用法?C++ smp_rmb怎么用?C++ smp_rmb使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了smp_rmb函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: skb_free_datagram_locked
void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb)
{
bool slow;
if (likely(atomic_read(&skb->users) == 1))
smp_rmb();
else if (likely(!atomic_dec_and_test(&skb->users)))
return;
slow = lock_sock_fast(sk);
skb_orphan(skb);
sk_mem_reclaim_partial(sk);
unlock_sock_fast(sk, slow);
/* skb is now orphaned, can be freed outside of locked section */
trace_kfree_skb(skb, skb_free_datagram_locked);
__kfree_skb(skb);
}
示例2: vlan_dev_get_egress_qos_mask
static inline u16
vlan_dev_get_egress_qos_mask(struct net_device *dev, struct sk_buff *skb)
{
struct vlan_priority_tci_mapping *mp;
smp_rmb(); /* coupled with smp_wmb() in vlan_dev_set_egress_priority() */
mp = vlan_dev_priv(dev)->egress_priority_map[(skb->priority & 0xF)];
while (mp) {
if (mp->priority == skb->priority) {
return mp->vlan_qos; /* This should already be shifted
* to mask correctly with the
* VLAN's TCI */
}
mp = mp->next;
}
return 0;
}
示例3: native_cpu_die
void native_cpu_die(unsigned int cpu)
{
unsigned int i;
for (i = 0; i < 10; i++) {
smp_rmb();
if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
if (system_state == SYSTEM_RUNNING)
pr_info("CPU %u is now offline\n", cpu);
return;
}
msleep(100);
}
pr_err("CPU %u didn't die...\n", cpu);
}
示例4: gc_jd_queue_enqueue
void
gc_jd_queue_enqueue(gc_jd_queue_t *q, gc_job_desc_t *item)
{
item->sys.next = 0;
_mutex_lock(ptr_to_ea(&q->mutex));
smp_rmb(); // import barrier
if (q->tail == 0){ // currently empty
q->tail = q->head = jdp_to_ea(item);
}
else { // not empty, append
ea_to_jdp(q->tail)->sys.next = jdp_to_ea(item);
q->tail = jdp_to_ea(item);
}
smp_wmb(); // orders stores above before clearing of mutex
_mutex_unlock(ptr_to_ea(&q->mutex));
}
示例5: boot_secondary
int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
{
unsigned long timeout;
/*
* set synchronisation state between this boot processor
* and the secondary one
*/
spin_lock(&boot_lock);
/*
* The secondary processor is waiting to be released from
* the holding pen - release it, then wait for it to flag
* that it has been released by resetting pen_release.
*
* Note that "pen_release" is the hardware CPU ID, whereas
* "cpu" is Linux's internal ID.
*/
write_pen_release(cpu);
/*
* Send the secondary CPU a soft interrupt, thereby causing
* the boot monitor to read the system wide flags register,
* and branch to the address found there.
*/
smp_cross_call(cpumask_of(cpu), 1);
timeout = jiffies + (1 * HZ);
while (time_before(jiffies, timeout)) {
smp_rmb();
if (pen_release == -1)
break;
udelay(10);
}
/*
* now the secondary core is starting up let it run its
* calibrations, then wait for it to finish
*/
spin_unlock(&boot_lock);
return pen_release != -1 ? -ENOSYS : 0;
}
示例6: mutex_lock
static struct dsm_client *dsm_find_client(char *cname)
{
int i;
struct dsm_client * client = NULL;
mutex_lock(&g_dsm_server.mtx_lock);
smp_rmb();
for(i=0; i<CLIENT_SIZE; i++){
if((test_bit(DSM_CLIENT_VAILD_BIT, &g_dsm_server.client_flag[i]))
&& (!strncasecmp(g_dsm_server.client_list[i]->client_name, cname, CLIENT_NAME_LEN))){
client = g_dsm_server.client_list[i];
break;
}
}
mutex_unlock(&g_dsm_server.mtx_lock);
DSM_LOG_DEBUG("cname: %s find %s\n", cname, client?"success":"failed");
return client;
}
示例7: print_buffers
static void print_buffers(void)
{
struct print_buffer *buffer;
struct entry_head *head;
off_t read_pos;
int len, ret;
while (1) {
buffer = get_next_buffer();
if (!buffer)
break;
read_pos = buffer->read_pos;
head = buffer->ring + read_pos;
len = head->len;
if (len) {
/* Print out non-empty entry and proceed */
/* Check if output goes to syslog */
if (head->dest == RT_PRINT_SYSLOG_STREAM) {
syslog(head->priority,
"%s", head->data);
} else {
ret = fwrite(head->data,
head->len, 1, head->dest);
(void)ret;
}
read_pos += sizeof(*head) + len;
} else {
/* Emptry entries mark the wrap-around */
read_pos = 0;
}
/* Make sure we have read the entry competely before
forwarding read_pos */
smp_rmb();
buffer->read_pos = read_pos;
/* Enforce the read_pos update before proceeding */
smp_wmb();
}
}
示例8: comedi_buf_read_alloc
/* allocates a chunk for the reader from filled (and munged) buffer space */
unsigned int comedi_buf_read_alloc(struct comedi_async *async,
unsigned int nbytes)
{
unsigned int available;
available = async->munge_count - async->buf_read_alloc_count;
if (nbytes > available)
nbytes = available;
async->buf_read_alloc_count += nbytes;
/*
* ensure the async buffer 'counts' are read before we
* attempt to read data from the read-alloc'ed buffer space
*/
smp_rmb();
return nbytes;
}
示例9: dev_mce_log
static int dev_mce_log(struct notifier_block *nb, unsigned long val,
void *data)
{
struct mce *mce = (struct mce *)data;
unsigned int next, entry;
wmb();
for (;;) {
entry = mce_log_get_idx_check(mcelog.next);
for (;;) {
/*
* When the buffer fills up discard new entries.
* Assume that the earlier errors are the more
* interesting ones:
*/
if (entry >= MCE_LOG_LEN) {
set_bit(MCE_OVERFLOW,
(unsigned long *)&mcelog.flags);
return NOTIFY_OK;
}
/* Old left over entry. Skip: */
if (mcelog.entry[entry].finished) {
entry++;
continue;
}
break;
}
smp_rmb();
next = entry + 1;
if (cmpxchg(&mcelog.next, entry, next) == entry)
break;
}
memcpy(mcelog.entry + entry, mce, sizeof(struct mce));
wmb();
mcelog.entry[entry].finished = 1;
wmb();
/* wake processes polling /dev/mcelog */
wake_up_interruptible(&mce_chrdev_wait);
return NOTIFY_OK;
}
示例10: virtqueue_num_heads
static int
virtqueue_num_heads(VuDev *dev, VuVirtq *vq, unsigned int idx)
{
uint16_t num_heads = vring_avail_idx(vq) - idx;
/* Check it isn't doing very strange things with descriptor numbers. */
if (num_heads > vq->vring.num) {
vu_panic(dev, "Guest moved used index from %u to %u",
idx, vq->shadow_avail_idx);
return -1;
}
if (num_heads) {
/* On success, callers read a descriptor at vq->last_avail_idx.
* Make sure descriptor read does not bypass avail index read. */
smp_rmb();
}
return num_heads;
}
示例11: mce_log
void mce_log(struct mce *mce)
{
unsigned next, entry;
int ret = 0;
trace_mce_record(mce);
ret = atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, mce);
if (ret == NOTIFY_STOP)
return;
mce->finished = 0;
wmb();
for (;;) {
entry = rcu_dereference_check_mce(mcelog.next);
for (;;) {
if (entry >= MCE_LOG_LEN) {
set_bit(MCE_OVERFLOW,
(unsigned long *)&mcelog.flags);
return;
}
if (mcelog.entry[entry].finished) {
entry++;
continue;
}
break;
}
smp_rmb();
next = entry + 1;
if (cmpxchg(&mcelog.next, entry, next) == entry)
break;
}
memcpy(mcelog.entry + entry, mce, sizeof(struct mce));
wmb();
mcelog.entry[entry].finished = 1;
wmb();
mce->finished = 1;
set_bit(0, &mce_need_notify);
}
示例12: sti_boot_secondary
int sti_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
unsigned long timeout;
/*
* set synchronisation state between this boot processor
* and the secondary one
*/
spin_lock(&boot_lock);
/*
* The secondary processor is waiting to be released from
* the holding pen - release it, then wait for it to flag
* that it has been released by resetting pen_release.
*
* Note that "pen_release" is the hardware CPU ID, whereas
* "cpu" is Linux's internal ID.
*/
write_pen_release(cpu_logical_map(cpu));
/*
* Send the secondary CPU a soft interrupt, thereby causing
* it to jump to the secondary entrypoint.
*/
arch_send_wakeup_ipi_mask(cpumask_of(cpu));
timeout = jiffies + (1 * HZ);
while (time_before(jiffies, timeout)) {
smp_rmb();
if (pen_release == -1)
break;
udelay(10);
}
/*
* now the secondary core is starting up let it run its
* calibrations, then wait for it to finish
*/
spin_unlock(&boot_lock);
return pen_release != -1 ? -ENOSYS : 0;
}
示例13: acpi_aml_readb_kern
static int acpi_aml_readb_kern(void)
{
int ret;
struct circ_buf *crc = &acpi_aml_io.in_crc;
char *p;
ret = acpi_aml_lock_read(crc, ACPI_AML_IN_KERN);
if (ret < 0)
return ret;
/* sync head before removing cmds */
smp_rmb();
p = &crc->buf[crc->tail];
ret = (int)*p;
/* sync tail before inserting cmds */
smp_mb();
crc->tail = (crc->tail + 1) & (ACPI_AML_BUF_SIZE - 1);
acpi_aml_unlock_fifo(ACPI_AML_IN_KERN, true);
return ret;
}
示例14: __rcu_process_callbacks
/*
* This does the RCU processing work from softirq context.
*/
static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp,
struct rcu_data *rdp)
{
if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch)) {
*rdp->donetail = rdp->curlist;
rdp->donetail = rdp->curtail;
rdp->curlist = NULL;
rdp->curtail = &rdp->curlist;
}
local_irq_disable();
if (rdp->nxtlist && !rdp->curlist) {
rdp->curlist = rdp->nxtlist;
rdp->curtail = rdp->nxttail;
rdp->nxtlist = NULL;
rdp->nxttail = &rdp->nxtlist;
local_irq_enable();
/*
* start the next batch of callbacks
*/
/* determine batch number */
rdp->batch = rcp->cur + 1;
/* see the comment and corresponding wmb() in
* the rcu_start_batch()
*/
smp_rmb();
if (!rcp->next_pending) {
/* and start it/schedule start if it's a new batch */
spin_lock(&rcp->lock);
rcp->next_pending = 1;
rcu_start_batch(rcp);
spin_unlock(&rcp->lock);
}
} else {
local_irq_enable();
}
rcu_check_quiescent_state(rcp, rdp);
if (rdp->donelist)
rcu_do_batch(rdp);
}
示例15: versatile_boot_secondary
int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
unsigned long timeout;
/*
* Set synchronisation state between this boot processor
* and the secondary one
*/
spin_lock(&boot_lock);
/*
* This is really belt and braces; we hold unintended secondary
* CPUs in the holding pen until we're ready for them. However,
* since we haven't sent them a soft interrupt, they shouldn't
* be there.
*/
write_pen_release(cpu_logical_map(cpu));
/*
* Send the secondary CPU a soft interrupt, thereby causing
* the boot monitor to read the system wide flags register,
* and branch to the address found there.
*/
arch_send_wakeup_ipi_mask(cpumask_of(cpu));
timeout = jiffies + (1 * HZ);
while (time_before(jiffies, timeout)) {
smp_rmb();
if (pen_release == -1)
break;
udelay(10);
}
/*
* now the secondary core is starting up let it run its
* calibrations, then wait for it to finish
*/
spin_unlock(&boot_lock);
return pen_release != -1 ? -ENOSYS : 0;
}