当前位置: 首页>>代码示例>>C++>>正文


C++ cpumask_and函数代码示例

本文整理汇总了C++中cpumask_and函数的典型用法代码示例。如果您正苦于以下问题:C++ cpumask_and函数的具体用法?C++ cpumask_and怎么用?C++ cpumask_and使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了cpumask_and函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: tick_do_periodic_broadcast

/*
 * Periodic broadcast:
 * - invoke the broadcast handlers
 */
static void tick_do_periodic_broadcast(void)
{
	raw_spin_lock(&tick_broadcast_lock);

	cpumask_and(to_cpumask(tmpmask),
		    cpu_online_mask, tick_get_broadcast_mask());
	tick_do_broadcast(to_cpumask(tmpmask));

	raw_spin_unlock(&tick_broadcast_lock);
}
开发者ID:gitter-badger,项目名称:ARRR,代码行数:14,代码来源:tick-broadcast.c

示例2: pcrypt_init_padata

static int pcrypt_init_padata(struct padata_pcrypt *pcrypt,
			      const char *name)
{
	int ret = -ENOMEM;
	struct pcrypt_cpumask *mask;

	get_online_cpus();

	pcrypt->wq = alloc_workqueue("%s", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE,
				     1, name);
	if (!pcrypt->wq)
		goto err;

	pcrypt->pinst = padata_alloc_possible(pcrypt->wq);
	if (!pcrypt->pinst)
		goto err_destroy_workqueue;

	mask = kmalloc(sizeof(*mask), GFP_KERNEL);
	if (!mask)
		goto err_free_padata;
	if (!alloc_cpumask_var(&mask->mask, GFP_KERNEL)) {
		kfree(mask);
		goto err_free_padata;
	}

	cpumask_and(mask->mask, cpu_possible_mask, cpu_online_mask);
	rcu_assign_pointer(pcrypt->cb_cpumask, mask);

	pcrypt->nblock.notifier_call = pcrypt_cpumask_change_notify;
	ret = padata_register_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock);
	if (ret)
		goto err_free_cpumask;

	ret = pcrypt_sysfs_add(pcrypt->pinst, name);
	if (ret)
		goto err_unregister_notifier;

	put_online_cpus();

	return ret;

err_unregister_notifier:
	padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock);
err_free_cpumask:
	free_cpumask_var(mask->mask);
	kfree(mask);
err_free_padata:
	padata_free(pcrypt->pinst);
err_destroy_workqueue:
	destroy_workqueue(pcrypt->wq);
err:
	put_online_cpus();

	return ret;
}
开发者ID:AlexShiLucky,项目名称:linux,代码行数:55,代码来源:pcrypt.c

示例3: setup_affinity

/*
 * Generic version of the affinity autoselector.
 */
static int
setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
{
	struct irq_chip *chip = irq_desc_get_chip(desc);
	struct cpumask *set = irq_default_affinity;
	int ret, node = desc->irq_data.node;

	/* Excludes PER_CPU and NO_BALANCE interrupts */
	if (!irq_can_set_affinity(irq))
		return 0;

	/*
	 * Preserve an userspace affinity setup, but make sure that
	 * one of the targets is online.
	 */
	if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
		if (cpumask_intersects(desc->irq_data.affinity,
				       cpu_online_mask))
			set = desc->irq_data.affinity;
		else
			irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
	}

	cpumask_and(mask, cpu_online_mask, set);
	if (node != NUMA_NO_NODE) {
		const struct cpumask *nodemask = cpumask_of_node(node);

		/* make sure at least one of the cpus in nodemask is online */
		if (cpumask_intersects(mask, nodemask))
			cpumask_and(mask, mask, nodemask);
	}
	ret = chip->irq_set_affinity(&desc->irq_data, mask, false);
	switch (ret) {
	case IRQ_SET_MASK_OK:
		cpumask_copy(desc->irq_data.affinity, mask);
	case IRQ_SET_MASK_OK_NOCOPY:
		irq_set_thread_affinity(desc);
	}
	return 0;
}
开发者ID:ReflexBow,项目名称:android_kernel_motorola_ghost,代码行数:43,代码来源:manage.c

示例4: cpuidle_wakeup_mwait

void cpuidle_wakeup_mwait(cpumask_t *mask)
{
    cpumask_t target;
    unsigned int cpu;

    cpumask_and(&target, mask, &cpuidle_mwait_flags);

    /* CPU is MWAITing on the cpuidle_mwait_wakeup flag. */
    for_each_cpu(cpu, &target)
        mwait_wakeup(cpu) = 0;

    cpumask_andnot(mask, mask, &target);
}
开发者ID:Ultra-Seven,项目名称:gxen,代码行数:13,代码来源:cpu_idle.c

示例5: cpudl_find

/*
 * cpudl_find - find the best (later-dl) CPU in the system
 * @cp: the cpudl max-heap context
 * @p: the task
 * @later_mask: a mask to fill in with the selected CPUs (or NULL)
 *
 * Returns: int - best CPU (heap maximum if suitable)
 */
int cpudl_find(struct cpudl *cp, struct task_struct *p,
	       struct cpumask *later_mask)
{
	int best_cpu = -1;
	const struct sched_dl_entity *dl_se = &p->dl;

	if (later_mask && cpumask_and(later_mask, cp->free_cpus,
			&p->cpus_allowed) && cpumask_and(later_mask,
			later_mask, cpu_active_mask)) {
		best_cpu = cpumask_any(later_mask);
		goto out;
	} else if (cpumask_test_cpu(cpudl_maximum(cp), &p->cpus_allowed) &&
			dl_time_before(dl_se->deadline, cp->elements[0].dl)) {
		best_cpu = cpudl_maximum(cp);
		if (later_mask)
			cpumask_set_cpu(best_cpu, later_mask);
	}

out:
	WARN_ON(best_cpu != -1 && !cpu_present(best_cpu));

	return best_cpu;
}
开发者ID:AkyZero,项目名称:wrapfs-latest,代码行数:31,代码来源:cpudeadline.c

示例6: arch_get_hmp_domains

void __init arch_get_hmp_domains(struct list_head *hmp_domains_list)
{
	struct hmp_domain *domain;

	arch_get_fast_and_slow_cpus(&hmp_fast_cpu_mask, &hmp_slow_cpu_mask);

	/*
	 * Initialize hmp_domains
	 * Must be ordered with respect to compute capacity.
	 * Fastest domain at head of list.
	 */
	if(!cpumask_empty(&hmp_slow_cpu_mask)) {
		domain = (struct hmp_domain *)
			kmalloc(sizeof(struct hmp_domain), GFP_KERNEL);
		cpumask_copy(&domain->possible_cpus, &hmp_slow_cpu_mask);
		cpumask_and(&domain->cpus, cpu_online_mask, &domain->possible_cpus);
		list_add(&domain->hmp_domains, hmp_domains_list);
	}
	domain = (struct hmp_domain *)
		kmalloc(sizeof(struct hmp_domain), GFP_KERNEL);
	cpumask_copy(&domain->possible_cpus, &hmp_fast_cpu_mask);
	cpumask_and(&domain->cpus, cpu_online_mask, &domain->possible_cpus);
	list_add(&domain->hmp_domains, hmp_domains_list);
}
开发者ID:Fnoeoe,项目名称:android_kernel_samsung_grandprimevelte,代码行数:24,代码来源:topology.c

示例7: move_masked_irq

void move_masked_irq(int irq)
{
	struct irq_desc *desc = irq_to_desc(irq);

	if (likely(!(desc->status & IRQ_MOVE_PENDING)))
		return;

	/*
	 * Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
	 */
	if (CHECK_IRQ_PER_CPU(desc->status)) {
		WARN_ON(1);
		return;
	}

	desc->status &= ~IRQ_MOVE_PENDING;

	if (unlikely(cpumask_empty(desc->pending_mask)))
		return;

	if (!desc->chip->set_affinity)
		return;

	assert_spin_locked(&desc->lock);

	/*
	 * If there was a valid mask to work with, please
	 * do the disable, re-program, enable sequence.
	 * This is *not* particularly important for level triggered
	 * but in a edge trigger case, we might be setting rte
	 * when an active trigger is comming in. This could
	 * cause some ioapics to mal-function.
	 * Being paranoid i guess!
	 *
	 * For correct operation this depends on the caller
	 * masking the irqs.
	 */
	if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask)
		   < nr_cpu_ids)) {
		cpumask_and(desc->affinity,
			    desc->pending_mask, cpu_online_mask);
		desc->chip->set_affinity(irq, desc->affinity);
	}
	cpumask_clear(desc->pending_mask);
}
开发者ID:antonywcl,项目名称:AR-5315u_PLD,代码行数:45,代码来源:migration.c

示例8: irq_complete_move

void irq_complete_move(unsigned irq)
{
	struct irq_cfg *cfg = &irq_cfg[irq];
	cpumask_t cleanup_mask;
	int i;

	if (likely(!cfg->move_in_progress))
		return;

	if (unlikely(cpu_isset(smp_processor_id(), cfg->old_domain)))
		return;

	cpumask_and(&cleanup_mask, &cfg->old_domain, cpu_online_mask);
	cfg->move_cleanup_count = cpus_weight(cleanup_mask);
	for_each_cpu_mask(i, cleanup_mask)
		platform_send_ipi(i, IA64_IRQ_MOVE_VECTOR, IA64_IPI_DM_INT, 0);
	cfg->move_in_progress = 0;
}
开发者ID:Blackburn29,项目名称:PsycoKernel,代码行数:18,代码来源:irq_ia64.c

示例9: find_unassigned_vector

static inline int find_unassigned_vector(cpumask_t domain)
{
	cpumask_t mask;
	int pos, vector;

	cpumask_and(&mask, &domain, cpu_online_mask);
	if (cpus_empty(mask))
		return -EINVAL;

	for (pos = 0; pos < IA64_NUM_DEVICE_VECTORS; pos++) {
		vector = IA64_FIRST_DEVICE_VECTOR + pos;
		cpus_and(mask, domain, vector_table[vector]);
		if (!cpus_empty(mask))
			continue;
		return vector;
	}
	return -ENOSPC;
}
开发者ID:Blackburn29,项目名称:PsycoKernel,代码行数:18,代码来源:irq_ia64.c

示例10: flush_tlb_others

/**
 * flush_tlb_others - Tell the specified CPUs to invalidate their TLBs
 * @cpumask: The list of CPUs to target.
 * @mm: The VM context to flush from (if va!=FLUSH_ALL).
 * @va: Virtual address to flush or FLUSH_ALL to flush everything.
 */
static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
			     unsigned long va)
{
	cpumask_t tmp;

	/* A couple of sanity checks (to be removed):
	 * - mask must not be empty
	 * - current CPU must not be in mask
	 * - we do not send IPIs to as-yet unbooted CPUs.
	 */
	BUG_ON(!mm);
	BUG_ON(cpumask_empty(&cpumask));
	BUG_ON(cpumask_test_cpu(smp_processor_id(), &cpumask));

	cpumask_and(&tmp, &cpumask, cpu_online_mask);
	BUG_ON(!cpumask_equal(&cpumask, &tmp));

	/* I'm not happy about this global shared spinlock in the MM hot path,
	 * but we'll see how contended it is.
	 *
	 * Temporarily this turns IRQs off, so that lockups are detected by the
	 * NMI watchdog.
	 */
	spin_lock(&tlbstate_lock);

	flush_mm = mm;
	flush_va = va;
#if NR_CPUS <= BITS_PER_LONG
	atomic_set_mask(cpumask.bits[0], &flush_cpumask.bits[0]);
#else
#error Not supported.
#endif

	/* FIXME: if NR_CPUS>=3, change send_IPI_mask */
	smp_call_function(smp_flush_tlb, NULL, 1);

	while (!cpumask_empty(&flush_cpumask))
		/* Lockup detection does not belong here */
		smp_mb();

	flush_mm = NULL;
	flush_va = 0;
	spin_unlock(&tlbstate_lock);
}
开发者ID:007kumarraja,项目名称:rockchip-rk3188-mk908,代码行数:50,代码来源:tlb-smp.c

示例11: __clear_irq_vector

static void __clear_irq_vector(int irq)
{
	int vector, cpu;
	cpumask_t mask;
	cpumask_t domain;
	struct irq_cfg *cfg = &irq_cfg[irq];

	BUG_ON((unsigned)irq >= NR_IRQS);
	BUG_ON(cfg->vector == IRQ_VECTOR_UNASSIGNED);
	vector = cfg->vector;
	domain = cfg->domain;
	cpumask_and(&mask, &cfg->domain, cpu_online_mask);
	for_each_cpu_mask(cpu, mask)
		per_cpu(vector_irq, cpu)[vector] = -1;
	cfg->vector = IRQ_VECTOR_UNASSIGNED;
	cfg->domain = CPU_MASK_NONE;
	irq_status[irq] = IRQ_UNUSED;
	cpus_andnot(vector_table[vector], vector_table[vector], domain);
}
开发者ID:Blackburn29,项目名称:PsycoKernel,代码行数:19,代码来源:irq_ia64.c

示例12: rt_dump_vcpu

/*
 * Debug related code, dump vcpu/cpu information
 */
static void
rt_dump_vcpu(const struct scheduler *ops, const struct rt_vcpu *svc)
{
    cpumask_t *cpupool_mask, *mask;

    ASSERT(svc != NULL);
    /* idle vcpu */
    if( svc->sdom == NULL )
    {
        printk("\n");
        return;
    }

    /*
     * We can't just use 'cpumask_scratch' because the dumping can
     * happen from a pCPU outside of this scheduler's cpupool, and
     * hence it's not right to use the pCPU's scratch mask (which
     * may even not exist!). On the other hand, it is safe to use
     * svc->vcpu->processor's own scratch space, since we hold the
     * runqueue lock.
     */
    mask = _cpumask_scratch[svc->vcpu->processor];

    cpupool_mask = cpupool_domain_cpumask(svc->vcpu->domain);
    cpumask_and(mask, cpupool_mask, svc->vcpu->cpu_hard_affinity);
    cpulist_scnprintf(keyhandler_scratch, sizeof(keyhandler_scratch), mask);
    printk("[%5d.%-2u] cpu %u, (%"PRI_stime", %"PRI_stime"),"
           " cur_b=%"PRI_stime" cur_d=%"PRI_stime" last_start=%"PRI_stime"\n"
           " \t\t onQ=%d runnable=%d flags=%x effective hard_affinity=%s\n",
            svc->vcpu->domain->domain_id,
            svc->vcpu->vcpu_id,
            svc->vcpu->processor,
            svc->period,
            svc->budget,
            svc->cur_budget,
            svc->cur_deadline,
            svc->last_start,
            __vcpu_on_q(svc),
            vcpu_runnable(svc->vcpu),
            svc->flags,
            keyhandler_scratch);
}
开发者ID:rosslagerwall,项目名称:xen,代码行数:45,代码来源:sched_rt.c

示例13: SYSCALL_DEFINE3

SYSCALL_DEFINE3(fairsched_cpumask, unsigned int, id, unsigned int, len,
		unsigned long __user *, user_mask_ptr)
{
	struct cgroup *cgrp;
	int retval;
	cpumask_var_t new_mask, in_mask;

	if (!capable_setveid())
		return -EPERM;

	if (id == 0)
		return -EINVAL;

	cgrp = fairsched_open(id);
	if (IS_ERR(cgrp))
		return PTR_ERR(cgrp);

	if (!alloc_cpumask_var(&in_mask, GFP_KERNEL)) {
		retval = -ENOMEM;
		goto out;
	}
	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
		retval = -ENOMEM;
		goto out_free_in_mask;
	}

	retval = get_user_cpu_mask(user_mask_ptr, len, in_mask);
	if (retval == 0) {
		cpumask_and(new_mask, in_mask, cpu_active_mask);
		cgroup_lock();
		retval = cgroup_set_cpumask(cgrp, new_mask);
		cgroup_unlock();
	}

	free_cpumask_var(new_mask);

out_free_in_mask:
	free_cpumask_var(in_mask);
out:
	cgroup_kernel_close(cgrp);
	return retval;
}
开发者ID:vps2fast,项目名称:openvz-kernel,代码行数:42,代码来源:fairsched.c

示例14: flush_area_mask

void flush_area_mask(const cpumask_t *mask, const void *va, unsigned int flags)
{
    ASSERT(local_irq_is_enabled());

    if ( cpumask_test_cpu(smp_processor_id(), mask) )
        flush_area_local(va, flags);

    if ( !cpumask_subset(mask, cpumask_of(smp_processor_id())) )
    {
        spin_lock(&flush_lock);
        cpumask_and(&flush_cpumask, mask, &cpu_online_map);
        cpumask_clear_cpu(smp_processor_id(), &flush_cpumask);
        flush_va      = va;
        flush_flags   = flags;
        send_IPI_mask(&flush_cpumask, INVALIDATE_TLB_VECTOR);
        while ( !cpumask_empty(&flush_cpumask) )
            cpu_relax();
        spin_unlock(&flush_lock);
    }
}
开发者ID:CrazyXen,项目名称:XEN_CODE,代码行数:20,代码来源:smp.c

示例15: idu_irq_set_affinity

static int
idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
                     bool force)
{
    unsigned long flags;
    cpumask_t online;

    /* errout if no online cpu per @cpumask */
    if (!cpumask_and(&online, cpumask, cpu_online_mask))
        return -EINVAL;

    raw_spin_lock_irqsave(&mcip_lock, flags);

    idu_set_dest(data->hwirq, cpumask_bits(&online)[0]);
    idu_set_mode(data->hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_RR);

    raw_spin_unlock_irqrestore(&mcip_lock, flags);

    return IRQ_SET_MASK_OK;
}
开发者ID:causten,项目名称:linux,代码行数:20,代码来源:mcip.c


注:本文中的cpumask_and函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。