本文整理汇总了C++中BITS_TO_LONGS函数的典型用法代码示例。如果您正苦于以下问题:C++ BITS_TO_LONGS函数的具体用法?C++ BITS_TO_LONGS怎么用?C++ BITS_TO_LONGS使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了BITS_TO_LONGS函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: mlx4_bitmap_init
int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
u32 reserved_bot, u32 reserved_top)
{
/* num must be a power of 2 */
if (num != roundup_pow_of_two(num))
return -EINVAL;
bitmap->last = 0;
bitmap->top = 0;
bitmap->max = num - reserved_top;
bitmap->mask = mask;
bitmap->reserved_top = reserved_top;
spin_lock_init(&bitmap->lock);
bitmap->table = kzalloc(BITS_TO_LONGS(bitmap->max) *
sizeof (long), GFP_KERNEL);
if (!bitmap->table)
return -ENOMEM;
bitmap_set(bitmap->table, 0, reserved_bot);
return 0;
}
示例2: BITS_TO_LONGS
static __init struct cma *cma_create_area(unsigned long base_pfn,
unsigned long carved_out_count,
unsigned long count)
{
int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
struct cma *cma;
int ret = -ENOMEM;
pr_debug("%s(base %08lx, count %lx)\n", __func__, base_pfn, count);
cma = kzalloc(sizeof *cma, GFP_KERNEL);
if (!cma)
return ERR_PTR(-ENOMEM);
cma->base_pfn = base_pfn;
cma->count = count;
cma->free_count = count;
cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
#ifdef CMA_NO_MIGRATION
cma->isolated = true;
#endif
if (!cma->bitmap)
goto no_mem;
ret = cma_activate_area(base_pfn, carved_out_count);
if (ret)
goto error;
pr_debug("%s: returned %p\n", __func__, (void *)cma);
return cma;
error:
kfree(cma->bitmap);
no_mem:
kfree(cma);
return ERR_PTR(ret);
}
示例3: cma_activate_area
static int __init cma_activate_area(struct cma *cma)
{
int bitmap_size = BITS_TO_LONGS(cma->count) * sizeof(long);
unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
unsigned i = cma->count >> pageblock_order;
struct zone *zone;
cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
if (!cma->bitmap)
return -ENOMEM;
WARN_ON_ONCE(!pfn_valid(pfn));
zone = page_zone(pfn_to_page(pfn));
do {
unsigned j;
base_pfn = pfn;
for (j = pageblock_nr_pages; j; --j, pfn++) {
WARN_ON_ONCE(!pfn_valid(pfn));
/*
* alloc_contig_range requires the pfn range
* specified to be in the same zone. Make this
* simple by forcing the entire CMA resv range
* to be in the same zone.
*/
if (page_zone(pfn_to_page(pfn)) != zone)
goto err;
}
init_cma_reserved_pageblock(pfn_to_page(base_pfn));
} while (--i);
return 0;
err:
kfree(cma->bitmap);
return -EINVAL;
}
示例4: bits_to_user
static int bits_to_user(unsigned long *bits, unsigned int maxbit,
unsigned int maxlen, void __user *p, int compat)
{
int len, i;
if (compat) {
len = BITS_TO_LONGS_COMPAT(maxbit) * sizeof(compat_long_t);
if (len > maxlen)
len = maxlen;
for (i = 0; i < len / sizeof(compat_long_t); i++)
if (copy_to_user((compat_long_t __user *) p + i,
(compat_long_t *) bits +
i + 1 - ((i % 2) << 1),
sizeof(compat_long_t)))
return -EFAULT;
} else {
len = BITS_TO_LONGS(maxbit) * sizeof(long);
if (len > maxlen)
len = maxlen;
if (copy_to_user(p, bits, len))
return -EFAULT;
}
return len;
}
示例5: i915_gem_object_save_bit_17_swizzle
void
i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
{
int page_count = obj->base.size >> PAGE_SHIFT;
int i;
if (obj->bit_17 == NULL) {
obj->bit_17 = kmalloc(BITS_TO_LONGS(page_count) *
sizeof(long), GFP_KERNEL);
if (obj->bit_17 == NULL) {
DRM_ERROR("Failed to allocate memory for bit 17 "
"record\n");
return;
}
}
for (i = 0; i < page_count; i++) {
if (page_to_phys(obj->pages[i]) & (1 << 17))
__set_bit(i, obj->bit_17);
else
__clear_bit(i, obj->bit_17);
}
}
示例6: ath10k_htt_tx_alloc
int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
spin_lock_init(&htt->tx_lock);
if (test_bit(ATH10K_FW_FEATURE_WMI_10X, htt->ar->fw_features))
htt->max_num_pending_tx = TARGET_10X_NUM_MSDU_DESC;
else
htt->max_num_pending_tx = TARGET_NUM_MSDU_DESC;
ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
htt->max_num_pending_tx);
htt->pending_tx = kzalloc(sizeof(*htt->pending_tx) *
htt->max_num_pending_tx, GFP_KERNEL);
if (!htt->pending_tx)
return -ENOMEM;
htt->used_msdu_ids = kzalloc(sizeof(unsigned long) *
BITS_TO_LONGS(htt->max_num_pending_tx),
GFP_KERNEL);
if (!htt->used_msdu_ids) {
kfree(htt->pending_tx);
return -ENOMEM;
}
htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev,
sizeof(struct ath10k_htt_txbuf), 4, 0);
if (!htt->tx_pool) {
kfree(htt->used_msdu_ids);
kfree(htt->pending_tx);
return -ENOMEM;
}
return 0;
}
示例7: intel_setup_irq_remapping
static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode)
{
struct ir_table *ir_table;
struct page *pages;
unsigned long *bitmap;
ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
GFP_ATOMIC);
if (!iommu->ir_table)
return -ENOMEM;
pages = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO,
INTR_REMAP_PAGE_ORDER);
if (!pages) {
pr_err("IR%d: failed to allocate pages of order %d\n",
iommu->seq_id, INTR_REMAP_PAGE_ORDER);
kfree(iommu->ir_table);
return -ENOMEM;
}
bitmap = kcalloc(BITS_TO_LONGS(INTR_REMAP_TABLE_ENTRIES),
sizeof(long), GFP_ATOMIC);
if (bitmap == NULL) {
pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id);
__free_pages(pages, INTR_REMAP_PAGE_ORDER);
kfree(ir_table);
return -ENOMEM;
}
ir_table->base = page_address(pages);
ir_table->bitmap = bitmap;
iommu_set_irq_remapping(iommu, mode);
return 0;
}
示例8: hclge_set_vf_mc_mta_status
static int hclge_set_vf_mc_mta_status(struct hclge_vport *vport,
u8 *msg, u8 idx, bool is_end)
{
#define HCLGE_MTA_STATUS_MSG_SIZE 13
#define HCLGE_MTA_STATUS_MSG_BITS \
(HCLGE_MTA_STATUS_MSG_SIZE * BITS_PER_BYTE)
#define HCLGE_MTA_STATUS_MSG_END_BITS \
(HCLGE_MTA_TBL_SIZE % HCLGE_MTA_STATUS_MSG_BITS)
unsigned long status[BITS_TO_LONGS(HCLGE_MTA_STATUS_MSG_BITS)];
u16 tbl_cnt;
u16 tbl_idx;
u8 msg_ofs;
u8 msg_bit;
tbl_cnt = is_end ? HCLGE_MTA_STATUS_MSG_END_BITS :
HCLGE_MTA_STATUS_MSG_BITS;
/* set msg field */
msg_ofs = 0;
msg_bit = 0;
memset(status, 0, sizeof(status));
for (tbl_idx = 0; tbl_idx < tbl_cnt; tbl_idx++) {
if (msg[msg_ofs] & BIT(msg_bit))
set_bit(tbl_idx, status);
msg_bit++;
if (msg_bit == BITS_PER_BYTE) {
msg_bit = 0;
msg_ofs++;
}
}
return hclge_update_mta_status_common(vport,
status, idx * HCLGE_MTA_STATUS_MSG_BITS,
tbl_cnt, is_end);
}
示例9: mlxsw_sp2_kvdl_part_init
static struct mlxsw_sp2_kvdl_part *
mlxsw_sp2_kvdl_part_init(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp2_kvdl_part_info *info)
{
unsigned int indexes_per_usage_bit;
struct mlxsw_sp2_kvdl_part *part;
unsigned int index_range;
unsigned int usage_bit_count;
size_t usage_size;
if (!mlxsw_core_res_valid(mlxsw_sp->core,
info->usage_bit_count_res_id) ||
!mlxsw_core_res_valid(mlxsw_sp->core,
info->index_range_res_id))
return ERR_PTR(-EIO);
usage_bit_count = mlxsw_core_res_get(mlxsw_sp->core,
info->usage_bit_count_res_id);
index_range = mlxsw_core_res_get(mlxsw_sp->core,
info->index_range_res_id);
/* For some partitions, one usage bit represents a group of indexes.
* That's why we compute the number of indexes per usage bit here,
* according to queried resources.
*/
indexes_per_usage_bit = index_range / usage_bit_count;
usage_size = BITS_TO_LONGS(usage_bit_count) * sizeof(unsigned long);
part = kzalloc(sizeof(*part) + usage_size, GFP_KERNEL);
if (!part)
return ERR_PTR(-ENOMEM);
part->info = info;
part->usage_bit_count = usage_bit_count;
part->indexes_per_usage_bit = indexes_per_usage_bit;
part->last_allocated_bit = usage_bit_count - 1;
return part;
}
示例10: mlx5_mpfs_init
int mlx5_mpfs_init(struct mlx5_core_dev *dev)
{
int l2table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table);
struct mlx5_mpfs *mpfs;
if (!MLX5_VPORT_MANAGER(dev))
return 0;
mpfs = kzalloc(sizeof(*mpfs), GFP_KERNEL);
if (!mpfs)
return -ENOMEM;
mutex_init(&mpfs->lock);
mpfs->size = l2table_size;
mpfs->bitmap = kcalloc(BITS_TO_LONGS(l2table_size),
sizeof(uintptr_t), GFP_KERNEL);
if (!mpfs->bitmap) {
kfree(mpfs);
return -ENOMEM;
}
dev->priv.mpfs = mpfs;
return 0;
}
示例11: hns_roce_bitmap_init
int hns_roce_bitmap_init(struct hns_roce_bitmap *bitmap, u32 num, u32 mask,
u32 reserved_bot, u32 reserved_top)
{
u32 i;
if (num != roundup_pow_of_two(num))
return -EINVAL;
bitmap->last = 0;
bitmap->top = 0;
bitmap->max = num - reserved_top;
bitmap->mask = mask;
bitmap->reserved_top = reserved_top;
spin_lock_init(&bitmap->lock);
bitmap->table = kcalloc(BITS_TO_LONGS(bitmap->max), sizeof(long),
GFP_KERNEL);
if (!bitmap->table)
return -ENOMEM;
for (i = 0; i < reserved_bot; ++i)
set_bit(i, bitmap->table);
return 0;
}
示例12: iio_scan_mask_set
/**
* iio_scan_mask_set() - set particular bit in the scan mask
* @buffer: the buffer whose scan mask we are interested in
* @bit: the bit to be set.
**/
int iio_scan_mask_set(struct iio_buffer *buffer, int bit)
{
struct iio_dev *indio_dev = buffer->indio_dev;
unsigned long *mask;
unsigned long *trialmask;
trialmask = kmalloc(sizeof(*trialmask)*
BITS_TO_LONGS(indio_dev->masklength),
GFP_KERNEL);
if (trialmask == NULL)
return -ENOMEM;
if (!indio_dev->masklength) {
WARN_ON("trying to set scanmask prior to registering buffer\n");
kfree(trialmask);
return -EINVAL;
}
bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
set_bit(bit, trialmask);
if (indio_dev->available_scan_masks) {
mask = iio_scan_mask_match(indio_dev->available_scan_masks,
indio_dev->masklength,
trialmask);
if (!mask) {
kfree(trialmask);
return -EINVAL;
}
}
bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
buffer->scan_count++;
kfree(trialmask);
return 0;
};
示例13: msi_bitmap_alloc
int msi_bitmap_alloc(struct msi_bitmap *bmp, unsigned int irq_count,
struct device_node *of_node)
{
int size;
if (!irq_count)
return -EINVAL;
size = BITS_TO_LONGS(irq_count) * sizeof(long);
pr_debug("msi_bitmap: allocator bitmap size is 0x%x bytes\n", size);
bmp->bitmap = zalloc_maybe_bootmem(size, GFP_KERNEL);
if (!bmp->bitmap) {
pr_debug("msi_bitmap: ENOMEM allocating allocator bitmap!\n");
return -ENOMEM;
}
/* We zalloc'ed the bitmap, so all irqs are free by default */
spin_lock_init(&bmp->lock);
bmp->of_node = of_node_get(of_node);
bmp->irq_count = irq_count;
return 0;
}
示例14: i915_gem_object_save_bit_17_swizzle
void
i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
{
int page_count = obj->base.size >> PAGE_SHIFT;
int i;
if (obj->bit_17 == NULL) {
obj->bit_17 = kmalloc(BITS_TO_LONGS(page_count) *
sizeof(long), DRM_I915_GEM, M_WAITOK);
if (obj->bit_17 == NULL) {
DRM_ERROR("Failed to allocate memory for bit 17 "
"record\n");
return;
}
}
/* XXXKIB: review locking, atomics might be not needed there */
for (i = 0; i < page_count; i++) {
if (VM_PAGE_TO_PHYS(obj->pages[i]) & (1 << 17))
set_bit(i, obj->bit_17);
else
clear_bit(i, obj->bit_17);
}
}
示例15: LIST_HEAD
* EV_ABS events which should not be cached are listed here.
*/
static unsigned int input_abs_bypass_init_data[] __initdata = {
ABS_MT_TOUCH_MAJOR,
ABS_MT_TOUCH_MINOR,
ABS_MT_WIDTH_MAJOR,
ABS_MT_WIDTH_MINOR,
ABS_MT_ORIENTATION,
ABS_MT_POSITION_X,
ABS_MT_POSITION_Y,
ABS_MT_TOOL_TYPE,
ABS_MT_BLOB_ID,
ABS_MT_PRESSURE,
0
};
static unsigned long input_abs_bypass[BITS_TO_LONGS(ABS_CNT)];
static LIST_HEAD(input_dev_list);
static LIST_HEAD(input_handler_list);
/*
* input_mutex protects access to both input_dev_list and input_handler_list.
* This also causes input_[un]register_device and input_[un]register_handler
* be mutually exclusive which simplifies locking in drivers implementing
* input handlers.
*/
static DEFINE_MUTEX(input_mutex);
static struct input_handler *input_table[8];
static inline int is_event_supported(unsigned int code,