本文整理汇总了C++中idr_init函数的典型用法代码示例。如果您正苦于以下问题:C++ idr_init函数的具体用法?C++ idr_init怎么用?C++ idr_init使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了idr_init函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: rnic_init
static void
rnic_init(struct iwch_dev *rnicp)
{
idr_init(&rnicp->cqidr);
idr_init(&rnicp->qpidr);
idr_init(&rnicp->mmidr);
mtx_init(&rnicp->lock, "iwch rnic lock", NULL, MTX_DEF|MTX_DUPOK);
rnicp->attr.vendor_id = 0x168;
rnicp->attr.vendor_part_id = 7;
rnicp->attr.max_qps = T3_MAX_NUM_QP - 32;
rnicp->attr.max_wrs = T3_MAX_QP_DEPTH;
rnicp->attr.max_sge_per_wr = T3_MAX_SGE;
rnicp->attr.max_sge_per_rdma_write_wr = T3_MAX_SGE;
rnicp->attr.max_cqs = T3_MAX_NUM_CQ - 1;
rnicp->attr.max_cqes_per_cq = T3_MAX_CQ_DEPTH;
rnicp->attr.max_mem_regs = cxio_num_stags(&rnicp->rdev);
rnicp->attr.max_phys_buf_entries = T3_MAX_PBL_SIZE;
rnicp->attr.max_pds = T3_MAX_NUM_PD - 1;
rnicp->attr.mem_pgsizes_bitmask = T3_PAGESIZE_MASK;
rnicp->attr.max_mr_size = T3_MAX_MR_SIZE;
rnicp->attr.can_resize_wq = 0;
rnicp->attr.max_rdma_reads_per_qp = 8;
rnicp->attr.max_rdma_read_resources =
rnicp->attr.max_rdma_reads_per_qp * rnicp->attr.max_qps;
rnicp->attr.max_rdma_read_qp_depth = 8; /* IRD */
rnicp->attr.max_rdma_read_depth =
rnicp->attr.max_rdma_read_qp_depth * rnicp->attr.max_qps;
rnicp->attr.rq_overflow_handled = 0;
rnicp->attr.can_modify_ird = 0;
rnicp->attr.can_modify_ord = 0;
rnicp->attr.max_mem_windows = rnicp->attr.max_mem_regs - 1;
rnicp->attr.stag0_value = 1;
rnicp->attr.zbva_support = 1;
rnicp->attr.local_invalidate_fence = 1;
rnicp->attr.cq_overflow_detection = 1;
return;
}
示例2: drm_gem_init
int
drm_gem_init(struct drm_device *dev)
{
spin_lock_init(&dev->object_name_lock);
idr_init(&dev->object_name_idr);
atomic_set(&dev->object_count, 0);
atomic_set(&dev->object_memory, 0);
atomic_set(&dev->pin_count, 0);
atomic_set(&dev->pin_memory, 0);
atomic_set(&dev->gtt_count, 0);
atomic_set(&dev->gtt_memory, 0);
return 0;
}
示例3: kmalloc
struct p9_idpool *p9_idpool_create(void)
{
struct p9_idpool *p;
p = kmalloc(sizeof(struct p9_idpool), GFP_KERNEL);
if (!p)
return ERR_PTR(-ENOMEM);
spin_lock_init(&p->lock);
idr_init(&p->pool);
return p;
}
示例4: ipc_init_ids
/**
* ipc_init_ids - initialise ipc identifiers
* @ids: ipc identifier set
*
* Set up the sequence range to use for the ipc identifier range (limited
* below IPCMNI) then initialise the keys hashtable and ids idr.
*/
int ipc_init_ids(struct ipc_ids *ids)
{
int err;
ids->in_use = 0;
ids->seq = 0;
ids->next_id = -1;
init_rwsem(&ids->rwsem);
err = rhashtable_init(&ids->key_ht, &ipc_kht_params);
if (err)
return err;
idr_init(&ids->ipcs_idr);
ids->tables_initialized = true;
return 0;
}
示例5: ath10k_htt_tx_alloc
int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
int ret, size;
ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
htt->max_num_pending_tx);
spin_lock_init(&htt->tx_lock);
idr_init(&htt->pending_tx);
size = htt->max_num_pending_tx * sizeof(struct ath10k_htt_txbuf);
htt->txbuf.vaddr = dma_alloc_coherent(ar->dev, size,
&htt->txbuf.paddr,
GFP_KERNEL);
if (!htt->txbuf.vaddr) {
ath10k_err(ar, "failed to alloc tx buffer\n");
ret = -ENOMEM;
goto free_idr_pending_tx;
}
ret = ath10k_htt_tx_alloc_cont_frag_desc(htt);
if (ret) {
ath10k_err(ar, "failed to alloc cont frag desc: %d\n", ret);
goto free_txbuf;
}
ret = ath10k_htt_tx_alloc_txq(htt);
if (ret) {
ath10k_err(ar, "failed to alloc txq: %d\n", ret);
goto free_frag_desc;
}
return 0;
free_frag_desc:
ath10k_htt_tx_free_cont_frag_desc(htt);
free_txbuf:
size = htt->max_num_pending_tx *
sizeof(struct ath10k_htt_txbuf);
dma_free_coherent(htt->ar->dev, size, htt->txbuf.vaddr,
htt->txbuf.paddr);
free_idr_pending_tx:
idr_destroy(&htt->pending_tx);
return ret;
}
示例6: smbsrv_init_sessions
/*
* init the sessions structures
*/
NTSTATUS smbsrv_init_sessions(struct smbsrv_connection *smb_conn, uint64_t limit)
{
/*
* the idr_* functions take 'int' as limit,
* and only work with a max limit 0x00FFFFFF
*/
limit &= 0x00FFFFFF;
smb_conn->sessions.idtree_vuid = idr_init(smb_conn);
NT_STATUS_HAVE_NO_MEMORY(smb_conn->sessions.idtree_vuid);
smb_conn->sessions.idtree_limit = limit;
smb_conn->sessions.list = NULL;
return NT_STATUS_OK;
}
示例7: sis_driver_load
static int sis_driver_load(struct drm_device *dev, unsigned long chipset)
{
drm_sis_private_t *dev_priv;
pci_set_master(dev->pdev);
dev_priv = kzalloc(sizeof(drm_sis_private_t), GFP_KERNEL);
if (dev_priv == NULL)
return -ENOMEM;
idr_init(&dev_priv->object_idr);
dev->dev_private = (void *)dev_priv;
dev_priv->chipset = chipset;
return 0;
}
示例8: ipc_init_ids
void ipc_init_ids(struct ipc_ids *ids)
{
init_rwsem(&ids->rw_mutex);
ids->in_use = 0;
ids->seq = 0;
{
int seq_limit = INT_MAX/SEQ_MULTIPLIER;
if (seq_limit > USHRT_MAX)
ids->seq_max = USHRT_MAX;
else
ids->seq_max = seq_limit;
}
idr_init(&ids->ipcs_idr);
}
示例9: ath10k_htt_tx_alloc
int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
int ret, size;
ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
htt->max_num_pending_tx);
mtx_init(&htt->tx_lock, device_get_nameunit(ar->sc_dev),
"athp htt tx", MTX_DEF);
mtx_init(&htt->tx_comp_lock, device_get_nameunit(ar->sc_dev),
"athp htt comp tx", MTX_DEF);
idr_init(&htt->pending_tx);
htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->sc_dev,
sizeof(struct ath10k_htt_txbuf), 4, 0);
if (!htt->tx_pool) {
ret = -ENOMEM;
goto free_idr_pending_tx;
}
if (!ar->hw_params.continuous_frag_desc)
goto skip_frag_desc_alloc;
size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc);
if (athp_descdma_alloc(ar, &htt->frag_desc.dd, "htt frag_desc",
8, size) != 0) {
ath10k_warn(ar, "failed to alloc fragment desc memory\n");
ret = -ENOMEM;
goto free_tx_pool;
}
htt->frag_desc.vaddr = (void *) htt->frag_desc.dd.dd_desc;
htt->frag_desc.paddr = htt->frag_desc.dd.dd_desc_paddr;
skip_frag_desc_alloc:
return 0;
free_tx_pool:
dma_pool_destroy(htt->tx_pool);
free_idr_pending_tx:
mtx_destroy(&htt->tx_lock);
idr_destroy(&htt->pending_tx);
return ret;
}
示例10: ath10k_htt_tx_alloc
int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
htt->max_num_pending_tx);
spin_lock_init(&htt->tx_lock);
idr_init(&htt->pending_tx);
htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev,
sizeof(struct ath10k_htt_txbuf), 4, 0);
if (!htt->tx_pool) {
idr_destroy(&htt->pending_tx);
return -ENOMEM;
}
return 0;
}
示例11: intel_svm_alloc_pasid_tables
int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu)
{
struct page *pages;
int order;
/* Start at 2 because it's defined as 2^(1+PSS) */
iommu->pasid_max = 2 << ecap_pss(iommu->ecap);
/* Eventually I'm promised we will get a multi-level PASID table
* and it won't have to be physically contiguous. Until then,
* limit the size because 8MiB contiguous allocations can be hard
* to come by. The limit of 0x20000, which is 1MiB for each of
* the PASID and PASID-state tables, is somewhat arbitrary. */
if (iommu->pasid_max > 0x20000)
iommu->pasid_max = 0x20000;
order = get_order(sizeof(struct pasid_entry) * iommu->pasid_max);
pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
if (!pages) {
pr_warn("IOMMU: %s: Failed to allocate PASID table\n",
iommu->name);
return -ENOMEM;
}
iommu->pasid_table = page_address(pages);
pr_info("%s: Allocated order %d PASID table.\n", iommu->name, order);
if (ecap_dis(iommu->ecap)) {
/* Just making it explicit... */
BUILD_BUG_ON(sizeof(struct pasid_entry) != sizeof(struct pasid_state_entry));
pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
if (pages)
iommu->pasid_state_table = page_address(pages);
else
pr_warn("IOMMU: %s: Failed to allocate PASID state table\n",
iommu->name);
}
idr_init(&iommu->pasid_idr);
return 0;
}
示例12: idr_init
static struct cxl_afu *cxl_alloc_afu(struct cxl *adapter, int slice)
{
struct cxl_afu *afu;
if (!(afu = kzalloc(sizeof(struct cxl_afu), GFP_KERNEL)))
return NULL;
afu->adapter = adapter;
afu->dev.parent = &adapter->dev;
afu->dev.release = cxl_release_afu;
afu->slice = slice;
idr_init(&afu->contexts_idr);
mutex_init(&afu->contexts_lock);
spin_lock_init(&afu->afu_cntl_lock);
mutex_init(&afu->spa_mutex);
afu->prefault_mode = CXL_PREFAULT_NONE;
afu->irqs_max = afu->adapter->user_irqs;
return afu;
}
示例13: drm_gem_init
int
drm_gem_init(struct drm_device *dev)
{
struct drm_gem_mm *mm;
mutex_init(&dev->object_name_lock);
idr_init(&dev->object_name_idr);
mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
if (!mm) {
DRM_ERROR("out of memory\n");
return -ENOMEM;
}
dev->mm_private = mm;
drm_vma_offset_manager_init(&mm->vma_manager,
DRM_FILE_PAGE_OFFSET_START,
DRM_FILE_PAGE_OFFSET_SIZE);
return 0;
}
示例14: ath10k_htt_tx_alloc
int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
int ret, size;
ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
htt->max_num_pending_tx);
spin_lock_init(&htt->tx_lock);
idr_init(&htt->pending_tx);
htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev,
sizeof(struct ath10k_htt_txbuf), 4, 0);
if (!htt->tx_pool) {
ret = -ENOMEM;
goto free_idr_pending_tx;
}
if (!ar->hw_params.continuous_frag_desc)
goto skip_frag_desc_alloc;
size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc);
htt->frag_desc.vaddr = dma_alloc_coherent(ar->dev, size,
&htt->frag_desc.paddr,
GFP_DMA);
if (!htt->frag_desc.vaddr) {
ath10k_warn(ar, "failed to alloc fragment desc memory\n");
ret = -ENOMEM;
goto free_tx_pool;
}
skip_frag_desc_alloc:
return 0;
free_tx_pool:
dma_pool_destroy(htt->tx_pool);
free_idr_pending_tx:
idr_destroy(&htt->pending_tx);
return ret;
}
示例15: ib_ucm_init
static int __init ib_ucm_init(void)
{
int result;
result = register_chrdev_region(IB_UCM_DEV, 1, "infiniband_cm");
if (result) {
ucm_dbg("Error <%d> registering dev\n", result);
goto err_chr;
}
cdev_init(&ib_ucm_cdev, &ib_ucm_fops);
result = cdev_add(&ib_ucm_cdev, IB_UCM_DEV, 1);
if (result) {
ucm_dbg("Error <%d> adding cdev\n", result);
goto err_cdev;
}
ib_ucm_class = class_create(THIS_MODULE, "infiniband_cm");
if (IS_ERR(ib_ucm_class)) {
result = PTR_ERR(ib_ucm_class);
ucm_dbg("Error <%d> creating class\n", result);
goto err_class;
}
class_device_create(ib_ucm_class, IB_UCM_DEV, NULL, "ucm");
idr_init(&ctx_id_table);
init_MUTEX(&ctx_id_mutex);
return 0;
err_class:
cdev_del(&ib_ucm_cdev);
err_cdev:
unregister_chrdev_region(IB_UCM_DEV, 1);
err_chr:
return result;
}