本文整理汇总了C++中cpumask_bits函数的典型用法代码示例。如果您正苦于以下问题:C++ cpumask_bits函数的具体用法?C++ cpumask_bits怎么用?C++ cpumask_bits使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了cpumask_bits函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: flush_remote
/*
* This wrapper function around hv_flush_remote() does several things:
*
* - Provides a return value error-checking panic path, since
* there's never any good reason for hv_flush_remote() to fail.
* - Accepts a 32-bit PFN rather than a 64-bit PA, which generally
* is the type that Linux wants to pass around anyway.
* - Canonicalizes that lengths of zero make cpumasks NULL.
* - Handles deferring TLB flushes for dataplane tiles.
* - Tracks remote interrupts in the per-cpu irq_cpustat_t.
*
* Note that we have to wait until the cache flush completes before
* updating the per-cpu last_cache_flush word, since otherwise another
* concurrent flush can race, conclude the flush has already
* completed, and start to use the page while it's still dirty
* remotely (running concurrently with the actual evict, presumably).
*/
void flush_remote(unsigned long cache_pfn, unsigned long cache_control,
const struct cpumask *cache_cpumask_orig,
HV_VirtAddr tlb_va, unsigned long tlb_length,
unsigned long tlb_pgsize,
const struct cpumask *tlb_cpumask_orig,
HV_Remote_ASID *asids, int asidcount)
{
int rc;
struct cpumask cache_cpumask_copy, tlb_cpumask_copy;
struct cpumask *cache_cpumask, *tlb_cpumask;
HV_PhysAddr cache_pa;
mb(); /* provided just to simplify "magic hypervisor" mode */
/*
* Canonicalize and copy the cpumasks.
*/
if (cache_cpumask_orig && cache_control) {
cpumask_copy(&cache_cpumask_copy, cache_cpumask_orig);
cache_cpumask = &cache_cpumask_copy;
} else {
cpumask_clear(&cache_cpumask_copy);
cache_cpumask = NULL;
}
if (cache_cpumask == NULL)
cache_control = 0;
if (tlb_cpumask_orig && tlb_length) {
cpumask_copy(&tlb_cpumask_copy, tlb_cpumask_orig);
tlb_cpumask = &tlb_cpumask_copy;
} else {
cpumask_clear(&tlb_cpumask_copy);
tlb_cpumask = NULL;
}
hv_flush_update(cache_cpumask, tlb_cpumask, tlb_va, tlb_length,
asids, asidcount);
cache_pa = (HV_PhysAddr)cache_pfn << PAGE_SHIFT;
rc = hv_flush_remote(cache_pa, cache_control,
cpumask_bits(cache_cpumask),
tlb_va, tlb_length, tlb_pgsize,
cpumask_bits(tlb_cpumask),
asids, asidcount);
if (rc == 0)
return;
pr_err("hv_flush_remote(%#llx, %#lx, %p [%*pb], %#lx, %#lx, %#lx, %p [%*pb], %p, %d) = %d\n",
cache_pa, cache_control, cache_cpumask,
cpumask_pr_args(&cache_cpumask_copy),
(unsigned long)tlb_va, tlb_length, tlb_pgsize, tlb_cpumask,
cpumask_pr_args(&tlb_cpumask_copy), asids, asidcount, rc);
panic("Unsafe to continue.");
}
示例2: timer_list_show_tickdevices_header
static void timer_list_show_tickdevices_header(struct seq_file *m)
{
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
print_tickdevice(m, tick_get_broadcast_device(), -1);
SEQ_printf(m, "tick_broadcast_mask: %08lx\n",
cpumask_bits(tick_get_broadcast_mask())[0]);
#ifdef CONFIG_TICK_ONESHOT
SEQ_printf(m, "tick_broadcast_oneshot_mask: %08lx\n",
cpumask_bits(tick_get_broadcast_oneshot_mask())[0]);
#endif
SEQ_printf(m, "\n");
#endif
}
示例3: set_cpumask
int set_cpumask(envid_t veid, cpumask_t *mask)
{
static char maskstr[CPUMASK_NBITS * 2];
bitmap_snprintf(maskstr, CPUMASK_NBITS * 2,
cpumask_bits(mask), CPUMASK_NBITS);
logger(0, 0, "Setting CPU mask: %s", maskstr);
if (fairsched_cpumask(veid, sizeof(cpumask_t), cpumask_bits(mask))) {
logger(-1, errno, "fairsched_cpumask");
return VZ_SETFSHD_ERROR;
}
return 0;
}
示例4: gicv2_ipi_send_mask
void gicv2_ipi_send_mask(int irq, const cpumask_t *dest)
{
u8 tlist = (u8)cpumask_bits(dest)[0];
assert(irq < 16);
writel(tlist << 16 | irq, gicv2_dist_base() + GICD_SGIR);
}
示例5: hotplug_rtb_callback
static int hotplug_rtb_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
/*
*/
int this_cpumask = CPUSET_OF(1 << (int)hcpu);
int cpumask = CPUSET_OF(cpumask_bits(cpu_online_mask)[0]);
int cpudata = CPU_OF((int)hcpu) | cpumask;
switch (action & (~CPU_TASKS_FROZEN)) {
case CPU_STARTING:
uncached_logk(LOGK_HOTPLUG, (void *)(cpudata | this_cpumask));
break;
case CPU_DYING:
cpumask_set_cpu((unsigned long)hcpu, &cpu_dying_mask);
uncached_logk(LOGK_HOTPLUG, (void *)(cpudata & ~this_cpumask));
break;
default:
break;
}
return NOTIFY_OK;
}
示例6: set_max_cpus
static int set_max_cpus(const char *buf, const struct kernel_param *kp)
{
unsigned int i, ntokens = 0;
const char *cp = buf;
int val;
if (!clusters_inited)
return -EINVAL;
while ((cp = strpbrk(cp + 1, ":")))
ntokens++;
if (!ntokens)
return -EINVAL;
cp = buf;
for (i = 0; i < num_clusters; i++) {
if (sscanf(cp, "%d\n", &val) != 1)
return -EINVAL;
if (val > (int)cpumask_weight(managed_clusters[i]->cpus))
return -EINVAL;
managed_clusters[i]->max_cpu_request = val;
cp = strchr(cp, ':');
cp++;
trace_set_max_cpus(cpumask_bits(managed_clusters[i]->cpus)[0],
val);
}
schedule_delayed_work(&evaluate_hotplug_work, 0);
return 0;
}
示例7: compat_sys_sched_getaffinity
asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, unsigned int len,
compat_ulong_t __user *user_mask_ptr)
{
int ret;
cpumask_var_t mask;
if ((len * BITS_PER_BYTE) < nr_cpu_ids)
return -EINVAL;
if (len & (sizeof(compat_ulong_t)-1))
return -EINVAL;
if (!alloc_cpumask_var(&mask, GFP_KERNEL))
return -ENOMEM;
ret = sched_getaffinity(pid, mask);
if (ret == 0) {
size_t retlen = min_t(size_t, len, cpumask_size());
if (compat_put_bitmap(user_mask_ptr, cpumask_bits(mask), retlen * 8))
ret = -EFAULT;
else
ret = retlen;
}
free_cpumask_var(mask);
return ret;
}
示例8: cpumask_to_xenctl_cpumap
int cpumask_to_xenctl_cpumap(
struct xenctl_cpumap *xenctl_cpumap, const cpumask_t *cpumask)
{
unsigned int guest_bytes, copy_bytes, i;
uint8_t zero = 0;
int err = 0;
uint8_t *bytemap = xmalloc_array(uint8_t, (nr_cpu_ids + 7) / 8);
if ( !bytemap )
return -ENOMEM;
guest_bytes = (xenctl_cpumap->nr_cpus + 7) / 8;
copy_bytes = min_t(unsigned int, guest_bytes, (nr_cpu_ids + 7) / 8);
bitmap_long_to_byte(bytemap, cpumask_bits(cpumask), nr_cpu_ids);
if ( copy_bytes != 0 )
if ( copy_to_guest(xenctl_cpumap->bitmap, bytemap, copy_bytes) )
err = -EFAULT;
for ( i = copy_bytes; !err && i < guest_bytes; i++ )
if ( copy_to_guest_offset(xenctl_cpumap->bitmap, i, &zero, 1) )
err = -EFAULT;
xfree(bytemap);
return err;
}
示例9: xenctl_cpumap_to_cpumask
int xenctl_cpumap_to_cpumask(
cpumask_var_t *cpumask, const struct xenctl_cpumap *xenctl_cpumap)
{
unsigned int guest_bytes, copy_bytes;
int err = 0;
uint8_t *bytemap = xzalloc_array(uint8_t, (nr_cpu_ids + 7) / 8);
if ( !bytemap )
return -ENOMEM;
guest_bytes = (xenctl_cpumap->nr_cpus + 7) / 8;
copy_bytes = min_t(unsigned int, guest_bytes, (nr_cpu_ids + 7) / 8);
if ( copy_bytes != 0 )
{
if ( copy_from_guest(bytemap, xenctl_cpumap->bitmap, copy_bytes) )
err = -EFAULT;
if ( (xenctl_cpumap->nr_cpus & 7) && (guest_bytes == copy_bytes) )
bytemap[guest_bytes-1] &= ~(0xff << (xenctl_cpumap->nr_cpus & 7));
}
if ( err )
/* nothing */;
else if ( alloc_cpumask_var(cpumask) )
bitmap_byte_to_long(cpumask_bits(*cpumask), bytemap, nr_cpu_ids);
else
err = -ENOMEM;
xfree(bytemap);
return err;
}
示例10: idu_irq_set_affinity
static int
idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
bool force)
{
unsigned long flags;
cpumask_t online;
unsigned int destination_bits;
unsigned int distribution_mode;
/* errout if no online cpu per @cpumask */
if (!cpumask_and(&online, cpumask, cpu_online_mask))
return -EINVAL;
raw_spin_lock_irqsave(&mcip_lock, flags);
destination_bits = cpumask_bits(&online)[0];
idu_set_dest(data->hwirq, destination_bits);
if (ffs(destination_bits) == fls(destination_bits))
distribution_mode = IDU_M_DISTRI_DEST;
else
distribution_mode = IDU_M_DISTRI_RR;
idu_set_mode(data->hwirq, IDU_M_TRIG_LEVEL, distribution_mode);
raw_spin_unlock_irqrestore(&mcip_lock, flags);
return IRQ_SET_MASK_OK;
}
示例11: hotplug_rtb_callback
static int hotplug_rtb_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
/*
* Bits [19:4] of the data are the online mask, lower 4 bits are the
* cpu number that is being changed. Additionally, changes to the
* online_mask that will be done by the current hotplug will be made
* even though they aren't necessarily in the online mask yet.
*
* XXX: This design is limited to supporting at most 16 cpus
*/
unsigned long this_cpumask = CPUSET_OF(1 << (unsigned long)hcpu);
unsigned long cpumask = CPUSET_OF(cpumask_bits(cpu_online_mask)[0]);
unsigned long cpudata = CPU_OF((unsigned long)hcpu) | cpumask;
switch (action & (~CPU_TASKS_FROZEN)) {
case CPU_STARTING:
uncached_logk(LOGK_HOTPLUG, (void *)(cpudata | this_cpumask));
break;
case CPU_DYING:
uncached_logk(LOGK_HOTPLUG, (void *)(cpudata & ~this_cpumask));
break;
default:
break;
}
return NOTIFY_OK;
}
示例12: flush_tlb_others
/*==========================================================================*
* Name: flush_tlb_others
*
* Description: This routine requests other CPU to execute flush TLB.
* 1.Setup parameters.
* 2.Send 'INVALIDATE_TLB_IPI' to other CPU.
* Request other CPU to execute 'smp_invalidate_interrupt()'.
* 3.Wait for other CPUs operation finished.
*
* Born on Date: 2002.02.05
*
* Arguments: cpumask - bitmap of target CPUs
* *mm - a pointer to the mm struct for flush TLB
* *vma - a pointer to the vma struct include va
* va - virtual address for flush TLB
*
* Returns: void (cannot fail)
*
* Modification log:
* Date Who Description
* ---------- --- --------------------------------------------------------
*
*==========================================================================*/
static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
struct vm_area_struct *vma, unsigned long va)
{
unsigned long *mask;
#ifdef DEBUG_SMP
unsigned long flags;
__save_flags(flags);
if (!(flags & 0x0040)) /* Interrupt Disable NONONO */
BUG();
#endif /* DEBUG_SMP */
/*
* A couple of (to be removed) sanity checks:
*
* - we do not send IPIs to not-yet booted CPUs.
* - current CPU must not be in mask
* - mask must exist :)
*/
BUG_ON(cpumask_empty(&cpumask));
BUG_ON(cpumask_test_cpu(smp_processor_id(), &cpumask));
BUG_ON(!mm);
/* If a CPU which we ran on has gone down, OK. */
cpumask_and(&cpumask, &cpumask, cpu_online_mask);
if (cpumask_empty(&cpumask))
return;
/*
* i'm not happy about this global shared spinlock in the
* MM hot path, but we'll see how contended it is.
* Temporarily this turns IRQs off, so that lockups are
* detected by the NMI watchdog.
*/
spin_lock(&tlbstate_lock);
flush_mm = mm;
flush_vma = vma;
flush_va = va;
mask=cpumask_bits(&cpumask);
atomic_set_mask(*mask, (atomic_t *)&flush_cpumask);
/*
* We have to send the IPI only to
* CPUs affected.
*/
send_IPI_mask(&cpumask, INVALIDATE_TLB_IPI, 0);
while (!cpumask_empty((cpumask_t*)&flush_cpumask)) {
/* nothing. lockup detection does not belong here */
mb();
}
flush_mm = NULL;
flush_vma = NULL;
flush_va = 0;
spin_unlock(&tlbstate_lock);
}
示例13: store_rps_map
static ssize_t store_rps_map(struct netdev_rx_queue *queue,
const char *buf, size_t len)
{
struct rps_map *old_map, *map;
cpumask_var_t mask;
int err, cpu, i;
static DEFINE_MUTEX(rps_map_mutex);
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (!alloc_cpumask_var(&mask, GFP_KERNEL))
return -ENOMEM;
err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
if (err) {
free_cpumask_var(mask);
return err;
}
map = kzalloc(max_t(unsigned int,
RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
GFP_KERNEL);
if (!map) {
free_cpumask_var(mask);
return -ENOMEM;
}
i = 0;
for_each_cpu_and(cpu, mask, cpu_online_mask)
map->cpus[i++] = cpu;
if (i) {
map->len = i;
} else {
kfree(map);
map = NULL;
}
mutex_lock(&rps_map_mutex);
old_map = rcu_dereference_protected(queue->rps_map,
mutex_is_locked(&rps_map_mutex));
rcu_assign_pointer(queue->rps_map, map);
if (map)
static_key_slow_inc(&rps_needed);
if (old_map)
static_key_slow_dec(&rps_needed);
mutex_unlock(&rps_map_mutex);
if (old_map)
kfree_rcu(old_map, rcu);
free_cpumask_var(mask);
return len;
}
示例14: es7000_setup_apic_routing
static void es7000_setup_apic_routing(void)
{
int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id());
pr_info("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n",
(apic_version[apic] == 0x14) ?
"Physical Cluster" : "Logical Cluster",
nr_ioapics, cpumask_bits(es7000_target_cpus())[0]);
}
示例15: xlp_span_multiple_nodes
/* Checks if a mask spans multiple nodes
*
* @mask : cpumask to check for multiple node span
*/
int xlp_span_multiple_nodes(const struct cpumask *mask)
{
int l, f;
f = cpumask_first(mask);
l = find_last_bit(cpumask_bits(mask), NR_CPUS);
if ((f/NLM_MAX_CPU_PER_NODE) != (l/NLM_MAX_CPU_PER_NODE)) {
printk(KERN_DEBUG "Mask spans from cpu %#x to %#x. Spans across nodes are not supported\n", f, l);
return -EINVAL;
}
return 0;
}