本文整理汇总了C++中put_cpu函数的典型用法代码示例。如果您正苦于以下问题:C++ put_cpu函数的具体用法?C++ put_cpu怎么用?C++ put_cpu使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了put_cpu函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: vfp_sync_hwstate
void vfp_sync_hwstate(struct thread_info *thread)
{
unsigned int cpu = get_cpu();
/*
* If the thread we're interested in is the current owner of the
* hardware VFP state, then we need to save its state.
*/
if (vfp_current_hw_state[cpu] == &thread->vfpstate) {
u32 fpexc = fmrx(FPEXC);
/*
* Save the last VFP state on this CPU.
*/
fmxr(FPEXC, fpexc | FPEXC_EN);
vfp_save_state(&thread->vfpstate, fpexc | FPEXC_EN);
fmxr(FPEXC, fpexc);
}
put_cpu();
}
示例2: od_init
static int od_init(struct dbs_data *dbs_data, bool notify)
{
struct od_dbs_tuners *tuners;
u64 idle_time;
int cpu;
tuners = kzalloc(sizeof(*tuners), GFP_KERNEL);
if (!tuners) {
pr_err("%s: kzalloc failed\n", __func__);
return -ENOMEM;
}
cpu = get_cpu();
idle_time = get_cpu_idle_time_us(cpu, NULL);
put_cpu();
if (idle_time != -1ULL) {
/* Idle micro accounting is supported. Use finer thresholds */
tuners->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
/*
* In nohz/micro accounting case we set the minimum frequency
* not depending on HZ, but fixed (very low). The deferred
* timer might skip some samples if idle/sleeping as needed.
*/
dbs_data->min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
} else {
tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
/* For correct statistics, we need 10 ticks for each measure */
dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
jiffies_to_usecs(10);
}
tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
tuners->ignore_nice_load = 0;
tuners->powersave_bias = default_powersave_bias;
tuners->io_is_busy = should_io_be_busy();
dbs_data->tuners = tuners;
return 0;
}
示例3: handle_IPI
irqreturn_t
handle_IPI (int irq, void *dev_id)
{
int this_cpu = get_cpu();
unsigned long *pending_ipis = &__ia64_per_cpu_var(ipi_operation);
unsigned long ops;
mb(); /* Order interrupt and bit testing. */
while ((ops = xchg(pending_ipis, 0)) != 0) {
mb(); /* Order bit clearing and data access. */
do {
unsigned long which;
which = ffz(~ops);
ops &= ~(1 << which);
switch (which) {
case IPI_CALL_FUNC:
handle_call_data();
break;
case IPI_CPU_STOP:
stop_this_cpu();
break;
#ifdef CONFIG_KEXEC
case IPI_KDUMP_CPU_STOP:
unw_init_running(kdump_cpu_freeze, NULL);
break;
#endif
default:
printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n",
this_cpu, which);
break;
}
} while (ops);
mb(); /* Order data access and bit testing. */
}
put_cpu();
return IRQ_HANDLED;
}
示例4: msm_spm_smp_set_vdd
static void msm_spm_smp_set_vdd(void *data)
{
struct msm_spm_device *dev;
struct msm_spm_vdd_info *info = (struct msm_spm_vdd_info *)data;
if (msm_spm_L2_apcs_master)
dev = &msm_spm_l2_device;
else
dev = &per_cpu(msm_cpu_spm_device, info->cpu);
if (!dev->initialized)
return;
if (msm_spm_L2_apcs_master)
get_cpu();
dev->cpu_vdd = info->vlevel;
info->err = msm_spm_drv_set_vdd(&dev->reg_data, info->vlevel);
if (msm_spm_L2_apcs_master)
put_cpu();
}
示例5: ovs_flow_stats_clear
void ovs_flow_stats_clear(struct sw_flow *flow)
{
int cpu, cur_cpu;
if (!flow->stats.is_percpu) {
stats_reset(flow->stats.stat);
} else {
cur_cpu = get_cpu();
for_each_possible_cpu(cpu) {
if (cpu == cur_cpu)
local_bh_disable();
stats_reset(per_cpu_ptr(flow->stats.cpu_stats, cpu));
if (cpu == cur_cpu)
local_bh_enable();
}
put_cpu();
}
}
示例6: sys_ioperm
/*
* this changes the io permissions bitmap in the current task.
*/
asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
{
struct thread_struct * t = ¤t->thread;
struct tss_struct * tss;
unsigned long *bitmap;
if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
return -EINVAL;
if (turn_on && !capable(CAP_SYS_RAWIO))
return -EPERM;
/*
* If it's the first ioperm() call in this thread's lifetime, set the
* IO bitmap up. ioperm() is much less timing critical than clone(),
* this is why we delay this operation until now:
*/
if (!t->io_bitmap_ptr) {
bitmap = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
if (!bitmap)
return -ENOMEM;
memset(bitmap, 0xff, IO_BITMAP_BYTES);
t->io_bitmap_ptr = bitmap;
}
/*
* do it in the per-thread copy and in the TSS ...
*/
set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
tss = init_tss + get_cpu();
if (tss->io_bitmap_base == IO_BITMAP_OFFSET) { /* already active? */
set_bitmap(tss->io_bitmap, from, num, !turn_on);
} else {
memcpy(tss->io_bitmap, t->io_bitmap_ptr, IO_BITMAP_BYTES);
tss->io_bitmap_base = IO_BITMAP_OFFSET; /* Activate it in the TSS */
}
put_cpu();
return 0;
}
示例7: profile_flip_buffers
static void profile_flip_buffers(void)
{
int i, j, cpu;
down(&profile_flip_mutex);
j = per_cpu(cpu_profile_flip, get_cpu());
put_cpu();
on_each_cpu(__profile_flip_buffers, NULL, 0, 1);
for_each_online_cpu(cpu) {
struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[j];
for (i = 0; i < NR_PROFILE_HIT; ++i) {
if (!hits[i].hits) {
if (hits[i].pc)
hits[i].pc = 0;
continue;
}
atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
hits[i].hits = hits[i].pc = 0;
}
}
up(&profile_flip_mutex);
}
示例8: vfp_flush_hwstate
void vfp_flush_hwstate(struct thread_info *thread)
{
unsigned int cpu = get_cpu();
/*
* If the thread we're interested in is the current owner of the
* hardware VFP state, then we need to save its state.
*/
if (last_VFP_context[cpu] == &thread->vfpstate) {
u32 fpexc = fmrx(FPEXC);
fmxr(FPEXC, fpexc & ~FPEXC_EN);
/*
* Set the context to NULL to force a reload the next time
* the thread uses the VFP.
*/
last_VFP_context[cpu] = NULL;
}
put_cpu();
}
示例9: exit_thread
/*
* Free current thread data structures etc..
*/
void exit_thread(void)
{
struct task_struct *me = current;
struct thread_struct *t = &me->thread;
unsigned long *bp = t->io_bitmap_ptr;
struct fpu *fpu = &t->fpu;
if (bp) {
struct tss_struct *tss = &per_cpu(cpu_tss, get_cpu());
t->io_bitmap_ptr = NULL;
clear_thread_flag(TIF_IO_BITMAP);
/*
* Careful, clear this in the TSS too:
*/
memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
t->io_bitmap_max = 0;
put_cpu();
kfree(bp);
}
fpu__drop(fpu);
}
示例10: exit_thread
/*
* Free current thread data structures etc..
*/
void exit_thread(void)
{
/* The process may have allocated an io port bitmap... nuke it. */
if (unlikely(test_thread_flag(TIF_IO_BITMAP))) {
struct task_struct *tsk = current;
struct thread_struct *t = &tsk->thread;
int cpu = get_cpu();
struct tss_struct *tss = &per_cpu(init_tss, cpu);
kfree(t->io_bitmap_ptr);
t->io_bitmap_ptr = NULL;
clear_thread_flag(TIF_IO_BITMAP);
/*
* Careful, clear this in the TSS too:
*/
memset(tss->io_bitmap, 0xff, tss->io_bitmap_max);
t->io_bitmap_max = 0;
tss->io_bitmap_owner = NULL;
tss->io_bitmap_max = 0;
tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
put_cpu();
}
}
示例11: ovs_flow_stats_clear
void ovs_flow_stats_clear(struct sw_flow *flow)
{
int cpu, cur_cpu;
cur_cpu = get_cpu();
for_each_possible_cpu(cpu) {
struct sw_flow_stats *stats = &flow->stats[cpu];
if (cpu == cur_cpu)
local_bh_disable();
spin_lock(&stats->lock);
stats->used = 0;
stats->packet_count = 0;
stats->byte_count = 0;
stats->tcp_flags = 0;
spin_unlock(&stats->lock);
if (cpu == cur_cpu)
local_bh_enable();
}
put_cpu();
}
示例12: vfp_pm_save_context
void vfp_pm_save_context(void)
{
u32 fpexc = fmrx(FPEXC);
unsigned int cpu = get_cpu();
/* Save last_VFP_context if needed */
if (last_VFP_context[cpu]) {
/* Enable vfp to save context */
if (!(fpexc & FPEXC_EN)) {
vfp_enable(NULL);
fmxr(FPEXC, fpexc | FPEXC_EN);
}
vfp_save_state(last_VFP_context[cpu], fpexc);
/* disable, just in case */
fmxr(FPEXC, fpexc & ~FPEXC_EN);
last_VFP_context[cpu] = NULL;
}
put_cpu();
}
示例13: set_tls_desc
static void set_tls_desc(struct task_struct *p, int idx,
const struct user_desc *info, int n)
{
struct thread_struct *t = &p->thread;
struct desc_struct *desc = &t->tls_array[idx - GDT_ENTRY_TLS_MIN];
int cpu;
cpu = get_cpu();
while (n-- > 0) {
if (LDT_empty(info))
desc->a = desc->b = 0;
else
fill_ldt(desc, info);
++info;
++desc;
}
if (t == ¤t->thread)
load_TLS(t, cpu);
put_cpu();
}
示例14: msm_idle_stats_timer
static enum hrtimer_restart msm_idle_stats_timer(struct hrtimer *timer)
{
struct msm_idle_stats_device *stats_dev;
unsigned int cpu;
int64_t now;
int64_t interval;
stats_dev = container_of(timer, struct msm_idle_stats_device, timer);
cpu = get_cpu();
if (cpu != stats_dev->cpu) {
if (msm_idle_stats_debug_mask & MSM_IDLE_STATS_DEBUG_MIGRATION)
pr_info("%s: timer migrated from cpu%u to cpu%u\n",
__func__, stats_dev->cpu, cpu);
stats_dev->stats.event = MSM_IDLE_STATS_EVENT_TIMER_MIGRATED;
goto timer_exit;
}
now = ktime_to_us(ktime_get());
interval = now - stats_dev->stats.last_busy_start;
if (stats_dev->stats.busy_timer > 0 &&
interval >= stats_dev->stats.busy_timer - 1)
stats_dev->stats.event =
MSM_IDLE_STATS_EVENT_BUSY_TIMER_EXPIRED;
else
stats_dev->stats.event =
MSM_IDLE_STATS_EVENT_COLLECTION_TIMER_EXPIRED;
timer_exit:
atomic_set(&stats_dev->collecting, 0);
wake_up_interruptible(&stats_dev->wait_q);
put_cpu();
return HRTIMER_NORESTART;
}
示例15: check_sal_cache_flush
static void __init
check_sal_cache_flush (void)
{
unsigned long flags;
int cpu;
u64 vector;
cpu = get_cpu();
local_irq_save(flags);
/*
* Schedule a timer interrupt, wait until it's reported, and see if
* SAL_CACHE_FLUSH drops it.
*/
ia64_set_itv(IA64_TIMER_VECTOR);
ia64_set_itm(ia64_get_itc() + 1000);
while (!ia64_get_irr(IA64_TIMER_VECTOR))
cpu_relax();
ia64_sal_cache_flush(3);
if (ia64_get_irr(IA64_TIMER_VECTOR)) {
vector = ia64_get_ivr();
ia64_eoi();
WARN_ON(vector != IA64_TIMER_VECTOR);
} else {
sal_cache_flush_drops_interrupts = 1;
printk(KERN_ERR "SAL: SAL_CACHE_FLUSH drops interrupts; "
"PAL_CACHE_FLUSH will be used instead\n");
ia64_eoi();
}
local_irq_restore(flags);
put_cpu();
}