本文整理汇总了C++中write_seqlock函数的典型用法代码示例。如果您正苦于以下问题:C++ write_seqlock函数的具体用法?C++ write_seqlock怎么用?C++ write_seqlock使用的例子?那么, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了write_seqlock函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: timer_interrupt
irqreturn_t timer_interrupt (int irq, void *dev_id)
{
unsigned long next;
next = get_linux_timer();
again:
while ((signed long)(get_ccount() - next) > 0) {
profile_tick(CPU_PROFILING);
#ifndef CONFIG_SMP
update_process_times(user_mode(get_irq_regs()));
#endif
write_seqlock(&xtime_lock);
do_timer(1); /* Linux handler in kernel/timer.c */
/* Note that writing CCOMPARE clears the interrupt. */
next += CCOUNT_PER_JIFFY;
set_linux_timer(next);
write_sequnlock(&xtime_lock);
}
/* Allow platform to do something useful (Wdog). */
platform_heartbeat();
/* Make sure we didn't miss any tick... */
if ((signed long)(get_ccount() - next) > 0)
goto again;
return IRQ_HANDLED;
}
示例2: tick_do_update_jiffies64
/*
* Must be called with interrupts disabled !
*/
static void tick_do_update_jiffies64(ktime_t now)
{
unsigned long ticks = 0;
ktime_t delta;
/*
* Do a quick check without holding xtime_lock:
*/
delta = ktime_sub(now, last_jiffies_update);
if (delta.tv64 < tick_period.tv64)
return;
/* Reevalute with xtime_lock held */
write_seqlock(&xtime_lock);
delta = ktime_sub(now, last_jiffies_update);
if (delta.tv64 >= tick_period.tv64) {
delta = ktime_sub(delta, tick_period);
last_jiffies_update = ktime_add(last_jiffies_update,
tick_period);
/* Slow path for long timeouts */
if (unlikely(delta.tv64 >= tick_period.tv64)) {
s64 incr = ktime_to_ns(tick_period);
ticks = ktime_divns(delta, incr);
last_jiffies_update = ktime_add_ns(last_jiffies_update,
incr * ticks);
}
do_timer(++ticks);
/* Keep the tick_next_period variable up to date */
tick_next_period = ktime_add(last_jiffies_update, tick_period);
}
write_sequnlock(&xtime_lock);
}
示例3: tmu_timer_interrupt
static irqreturn_t tmu_timer_interrupt(int irq, void *dev_id,
struct pt_regs *regs)
{
unsigned long timer_status;
/* Clear UNF bit */
timer_status = ctrl_inw(TMU0_TCR);
timer_status &= ~0x100;
ctrl_outw(timer_status, TMU0_TCR);
/*
* Here we are in the timer irq handler. We just have irqs locally
* disabled but we don't know if the timer_bh is running on the other
* CPU. We need to avoid to SMP race with it. NOTE: we don' t need
* the irq version of write_lock because as just said we have irq
* locally disabled. -arca
*/
write_seqlock(&xtime_lock);
handle_timer_tick(regs);
write_sequnlock(&xtime_lock);
return IRQ_HANDLED;
}
示例4: ebsa110_timer_interrupt
static irqreturn_t
ebsa110_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
u32 count;
write_seqlock(&xtime_lock);
/* latch and read timer 1 */
__raw_writeb(0x40, PIT_CTRL);
count = __raw_readb(PIT_T1);
count |= __raw_readb(PIT_T1) << 8;
count += COUNT;
__raw_writeb(count & 0xff, PIT_T1);
__raw_writeb(count >> 8, PIT_T1);
timer_tick(regs);
write_sequnlock(&xtime_lock);
return IRQ_HANDLED;
}
示例5: imx_timer_interrupt
/*
* IRQ handler for the timer
*/
static irqreturn_t
imx_timer_interrupt(int irq, void *dev_id)
{
uint32_t tstat;
/* clear the interrupt */
tstat = IMX_TSTAT(TIMER_BASE);
IMX_TSTAT(TIMER_BASE) = 0;
if (tstat & TSTAT_COMP) {
do {
write_seqlock(&xtime_lock);
timer_tick();
write_sequnlock(&xtime_lock);
IMX_TCMP(TIMER_BASE) += evt_diff;
} while (unlikely((int32_t)(IMX_TCMP(TIMER_BASE)
- IMX_TCN(TIMER_BASE)) < 0));
}
return IRQ_HANDLED;
}
示例6: timer_interrupt
irqreturn_t timer_interrupt(int irq, void *dummy)
{
/* last time the cmos clock got updated */
static long last_rtc_update;
write_seqlock(&xtime_lock);
do_timer(1);
profile_tick(CPU_PROFILING);
/*
* If we have an externally synchronized Linux clock, then update
* CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
* called as close as possible to 500 ms before the new second starts.
*/
if (ntp_synced() &&
xtime.tv_sec > last_rtc_update + 660 &&
(xtime.tv_nsec / NSEC_PER_USEC) >=
500000 - ((unsigned)TICK_SIZE) / 2
&& (xtime.tv_nsec / NSEC_PER_USEC) <=
500000 + ((unsigned)TICK_SIZE) / 2) {
if (set_rtc_mmss(xtime.tv_sec) == 0)
last_rtc_update = xtime.tv_sec;
else
/* Do it again in 60s. */
last_rtc_update = xtime.tv_sec - 600;
}
write_sequnlock(&xtime_lock);
#ifndef CONFIG_SMP
update_process_times(user_mode(get_irq_regs()));
#endif
return IRQ_HANDLED;
}
示例7: run_on
void *seqlock_write_test(void *arg)
{
int i;
int j;
int me = (long)arg;
long long n_writes_local = 0LL;
run_on(me);
atomic_inc(&nthreadsrunning);
while (ACCESS_ONCE(goflag) == GOFLAG_INIT)
poll(NULL, 0, 1);
while (ACCESS_ONCE(goflag) == GOFLAG_RUN) {
for (i = COUNT_UPDATE_RUN; i > 0; i--) {
write_seqlock(&test_seqlock);
for (j = 0; j < n_elems; j++)
testarray[j]++;
write_sequnlock(&test_seqlock);
barrier();
}
n_writes_local += COUNT_UPDATE_RUN;
}
__get_thread_var(n_writes_pt) += n_writes_local;
return NULL;
}
示例8: route_del
int route_del(unsigned long addr)
{
struct route_entry *rep;
struct route_entry **repp;
write_seqlock(&sl); //\lnlbl{del:w_sqlock}
repp = &route_list.re_next;
for (;;) {
rep = *repp;
if (rep == NULL)
break;
if (rep->addr == addr) {
*repp = rep->re_next;
write_sequnlock(&sl); //\lnlbl{del:w_squnlock1}
smp_mb();
rep->re_freed = 1; //\lnlbl{del:set_freed}
free(rep);
return 0;
}
repp = &rep->re_next;
}
write_sequnlock(&sl); //\lnlbl{del:w_squnlock2}
return -ENOENT;
}
示例9: timer_interrupt
/*
* This is the same as the above, except we _also_ save the current
* Time Stamp Counter value at the time of the timer interrupt, so that
* we later on can estimate the time of day more exactly.
*/
irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
/*
* Here we are in the timer irq handler. We just have irqs locally
* disabled but we don't know if the timer_bh is running on the other
* CPU. We need to avoid to SMP race with it. NOTE: we don' t need
* the irq version of write_lock because as just said we have irq
* locally disabled. -arca
*/
write_seqlock(&xtime_lock);
cur_timer->mark_offset();
do_timer_interrupt(irq, regs);
write_sequnlock(&xtime_lock);
#ifdef CONFIG_X86_LOCAL_APIC
if (using_apic_timer)
smp_send_timer_broadcast_ipi(regs);
#endif
return IRQ_HANDLED;
}
示例10: timer_interrupt
/*
* timer_interrupt() needs to keep up the real-time clock,
* as well as call the "do_timer()" routine every clocktick
*/
irqreturn_t timer_interrupt(int irq, void *dummy)
{
/* last time the cmos clock got updated */
static long last_rtc_update = 0;
/* Clear the interrupt condition */
outw(0, timer_membase + ALTERA_TIMER_STATUS_REG);
nios2_timer_count += NIOS2_TIMER_PERIOD;
write_seqlock(&xtime_lock);
do_timer(1);
profile_tick(CPU_PROFILING);
/*
* If we have an externally synchronized Linux clock, then update
* CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
* called as close as possible to 500 ms before the new second starts.
*/
if (ntp_synced() &&
xtime.tv_sec > last_rtc_update + 660 &&
(xtime.tv_nsec / 1000) >= 500000 - ((unsigned)TICK_SIZE) / 2 &&
(xtime.tv_nsec / 1000) <= 500000 + ((unsigned)TICK_SIZE) / 2) {
if (set_rtc_mmss(xtime.tv_sec) == 0)
last_rtc_update = xtime.tv_sec;
else
last_rtc_update = xtime.tv_sec - 600; /* do it again in 60 s */
}
write_sequnlock(&xtime_lock);
#ifndef CONFIG_SMP
update_process_times(user_mode(get_irq_regs()));
#endif
return (IRQ_HANDLED);
}
示例11: mark_offset_hpet
static void mark_offset_hpet(void)
{
unsigned long long this_offset, last_offset;
unsigned long offset;
write_seqlock(&monotonic_lock);
last_offset = ((unsigned long long)last_tsc_high<<32)|last_tsc_low;
rdtsc(last_tsc_low, last_tsc_high);
if (hpet_use_timer)
offset = hpet_readl(HPET_T0_CMP) - hpet_tick;
else
offset = hpet_readl(HPET_COUNTER);
if (unlikely(((offset - hpet_last) >= (2*hpet_tick)) && (hpet_last != 0))) {
int lost_ticks = ((offset - hpet_last) / hpet_tick) - 1;
jiffies_64 += lost_ticks;
}
hpet_last = offset;
/* update the monotonic base value */
this_offset = ((unsigned long long)last_tsc_high<<32)|last_tsc_low;
monotonic_base += cycles_2_ns(this_offset - last_offset);
write_sequnlock(&monotonic_lock);
}
示例12: handle_timer_tick
/*
* handle_timer_tick() needs to keep up the real-time clock,
* as well as call the "do_timer()" routine every clocktick
*/
void handle_timer_tick(void)
{
if (current->pid)
profile_tick(CPU_PROFILING);
/*
* Here we are in the timer irq handler. We just have irqs locally
* disabled but we don't know if the timer_bh is running on the other
* CPU. We need to avoid to SMP race with it. NOTE: we don' t need
* the irq version of write_lock because as just said we have irq
* locally disabled. -arca
*/
write_seqlock(&xtime_lock);
do_timer(1);
/*
* If we have an externally synchronized Linux clock, then update
* RTC clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
* called as close as possible to 500 ms before the new second starts.
*/
if (ntp_synced() &&
xtime.tv_sec > last_rtc_update + 660 &&
(xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
(xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
if (rtc_sh_set_time(xtime.tv_sec) == 0)
last_rtc_update = xtime.tv_sec;
else
/* do it again in 60s */
last_rtc_update = xtime.tv_sec - 600;
}
write_sequnlock(&xtime_lock);
#ifndef CONFIG_SMP
update_process_times(user_mode(get_irq_regs()));
#endif
}
示例13: afs_cell_purge
/*
* Purge in-memory cell database.
*/
void afs_cell_purge(struct afs_net *net)
{
struct afs_cell *ws;
_enter("");
write_seqlock(&net->cells_lock);
ws = rcu_access_pointer(net->ws_cell);
RCU_INIT_POINTER(net->ws_cell, NULL);
write_sequnlock(&net->cells_lock);
afs_put_cell(net, ws);
_debug("del timer");
if (del_timer_sync(&net->cells_timer))
atomic_dec(&net->cells_outstanding);
_debug("kick mgr");
afs_queue_cell_manager(net);
_debug("wait");
wait_var_event(&net->cells_outstanding,
!atomic_read(&net->cells_outstanding));
_leave("");
}
示例14: ipv4_local_port_range
/* Validate changes from /proc interface. */
static int ipv4_local_port_range(struct ctl_table *table, int write,
void __user *buffer,
size_t *lenp, loff_t *ppos)
{
struct net *net =
container_of(table->data, struct net, ipv4.ip_local_ports.range);
int ret;
int range[2];
struct ctl_table tmp = {
.data = &range,
.maxlen = sizeof(range),
.mode = table->mode,
.extra1 = &ip_local_port_range_min,
.extra2 = &ip_local_port_range_max,
};
inet_get_local_port_range(net, &range[0], &range[1]);
ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
if (write && ret == 0) {
/* Ensure that the upper limit is not smaller than the lower,
* and that the lower does not encroach upon the privileged
* port limit.
*/
if ((range[1] < range[0]) ||
(range[0] < net->ipv4.sysctl_ip_prot_sock))
ret = -EINVAL;
else
set_local_port_range(net, range);
}
return ret;
}
/* Validate changes from /proc interface. */
static int ipv4_privileged_ports(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
struct net *net = container_of(table->data, struct net,
ipv4.sysctl_ip_prot_sock);
int ret;
int pports;
int range[2];
struct ctl_table tmp = {
.data = &pports,
.maxlen = sizeof(pports),
.mode = table->mode,
.extra1 = &ip_privileged_port_min,
.extra2 = &ip_privileged_port_max,
};
pports = net->ipv4.sysctl_ip_prot_sock;
ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
if (write && ret == 0) {
inet_get_local_port_range(net, &range[0], &range[1]);
/* Ensure that the local port range doesn't overlap with the
* privileged port range.
*/
if (range[0] < pports)
ret = -EINVAL;
else
net->ipv4.sysctl_ip_prot_sock = pports;
}
return ret;
}
static void inet_get_ping_group_range_table(struct ctl_table *table, kgid_t *low, kgid_t *high)
{
kgid_t *data = table->data;
struct net *net =
container_of(table->data, struct net, ipv4.ping_group_range.range);
unsigned int seq;
do {
seq = read_seqbegin(&net->ipv4.ping_group_range.lock);
*low = data[0];
*high = data[1];
} while (read_seqretry(&net->ipv4.ping_group_range.lock, seq));
}
/* Update system visible IP port range */
static void set_ping_group_range(struct ctl_table *table, kgid_t low, kgid_t high)
{
kgid_t *data = table->data;
struct net *net =
container_of(table->data, struct net, ipv4.ping_group_range.range);
write_seqlock(&net->ipv4.ping_group_range.lock);
data[0] = low;
data[1] = high;
write_sequnlock(&net->ipv4.ping_group_range.lock);
}
示例15: hfi1_error_qp
/**
* hfi1_error_qp - put a QP into the error state
* @qp: the QP to put into the error state
* @err: the receive completion error to signal if a RWQE is active
*
* Flushes both send and receive work queues.
* Returns true if last WQE event should be generated.
* The QP r_lock and s_lock should be held and interrupts disabled.
* If we are already in error state, just return.
*/
int hfi1_error_qp(struct hfi1_qp *qp, enum ib_wc_status err)
{
struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
struct ib_wc wc;
int ret = 0;
if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
goto bail;
qp->state = IB_QPS_ERR;
if (qp->s_flags & (HFI1_S_TIMER | HFI1_S_WAIT_RNR)) {
qp->s_flags &= ~(HFI1_S_TIMER | HFI1_S_WAIT_RNR);
del_timer(&qp->s_timer);
}
if (qp->s_flags & HFI1_S_ANY_WAIT_SEND)
qp->s_flags &= ~HFI1_S_ANY_WAIT_SEND;
write_seqlock(&dev->iowait_lock);
if (!list_empty(&qp->s_iowait.list) && !(qp->s_flags & HFI1_S_BUSY)) {
qp->s_flags &= ~HFI1_S_ANY_WAIT_IO;
list_del_init(&qp->s_iowait.list);
if (atomic_dec_and_test(&qp->refcount))
wake_up(&qp->wait);
}
write_sequnlock(&dev->iowait_lock);
if (!(qp->s_flags & HFI1_S_BUSY)) {
qp->s_hdrwords = 0;
if (qp->s_rdma_mr) {
hfi1_put_mr(qp->s_rdma_mr);
qp->s_rdma_mr = NULL;
}
flush_tx_list(qp);
}
/* Schedule the sending tasklet to drain the send work queue. */
if (qp->s_last != qp->s_head)
hfi1_schedule_send(qp);
clear_mr_refs(qp, 0);
memset(&wc, 0, sizeof(wc));
wc.qp = &qp->ibqp;
wc.opcode = IB_WC_RECV;
if (test_and_clear_bit(HFI1_R_WRID_VALID, &qp->r_aflags)) {
wc.wr_id = qp->r_wr_id;
wc.status = err;
hfi1_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
}
wc.status = IB_WC_WR_FLUSH_ERR;
if (qp->r_rq.wq) {
struct hfi1_rwq *wq;
u32 head;
u32 tail;
spin_lock(&qp->r_rq.lock);
/* sanity check pointers before trusting them */
wq = qp->r_rq.wq;
head = wq->head;
if (head >= qp->r_rq.size)
head = 0;
tail = wq->tail;
if (tail >= qp->r_rq.size)
tail = 0;
while (tail != head) {
wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
if (++tail >= qp->r_rq.size)
tail = 0;
hfi1_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
}
wq->tail = tail;
spin_unlock(&qp->r_rq.lock);
} else if (qp->ibqp.event_handler)
ret = 1;
bail:
return ret;
}