本文整理汇总了C++中pci_dma_mapping_error函数的典型用法代码示例。如果您正苦于以下问题:C++ pci_dma_mapping_error函数的具体用法?C++ pci_dma_mapping_error怎么用?C++ pci_dma_mapping_error使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了pci_dma_mapping_error函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: efx_init_rx_buffer_skb
/**
* efx_init_rx_buffer_skb - create new RX buffer using skb-based allocation
*
* @rx_queue: Efx RX queue
* @rx_buf: RX buffer structure to populate
*
* This allocates memory for a new receive buffer, maps it for DMA,
* and populates a struct efx_rx_buffer with the relevant
* information. Return a negative error code or 0 on success.
*/
static int efx_init_rx_buffer_skb(struct efx_rx_queue *rx_queue,
struct efx_rx_buffer *rx_buf)
{
struct efx_nic *efx = rx_queue->efx;
struct net_device *net_dev = efx->net_dev;
int skb_len = efx->rx_buffer_len;
rx_buf->skb = netdev_alloc_skb(net_dev, skb_len);
if (unlikely(!rx_buf->skb))
return -ENOMEM;
/* Adjust the SKB for padding and checksum */
skb_reserve(rx_buf->skb, NET_IP_ALIGN);
rx_buf->len = skb_len - NET_IP_ALIGN;
rx_buf->data = (char *)rx_buf->skb->data;
rx_buf->skb->ip_summed = CHECKSUM_UNNECESSARY;
rx_buf->dma_addr = pci_map_single(efx->pci_dev,
rx_buf->data, rx_buf->len,
PCI_DMA_FROMDEVICE);
if (unlikely(pci_dma_mapping_error(efx->pci_dev, rx_buf->dma_addr))) {
dev_kfree_skb_any(rx_buf->skb);
rx_buf->skb = NULL;
return -EIO;
}
return 0;
}
示例2: nvc0_fb_create
static int
nvc0_fb_create(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
struct nvc0_fb_priv *priv;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
pfb->priv = priv;
priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!priv->r100c10_page) {
nvc0_fb_destroy(dev);
return -ENOMEM;
}
priv->r100c10 = pci_map_page(dev->pdev, priv->r100c10_page, 0,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(dev->pdev, priv->r100c10)) {
nvc0_fb_destroy(dev);
return -EFAULT;
}
nouveau_irq_register(dev, 25, nvc0_mfb_isr);
return 0;
}
示例3: xilly_map_single_pci
static dma_addr_t xilly_map_single_pci(struct xilly_cleanup *mem,
struct xilly_endpoint *ep,
void *ptr,
size_t size,
int direction
)
{
dma_addr_t addr = 0;
struct xilly_dma *this;
int pci_direction;
this = kmalloc(sizeof(struct xilly_dma), GFP_KERNEL);
if (!this)
return 0;
pci_direction = xilly_pci_direction(direction);
addr = pci_map_single(ep->pdev, ptr, size, pci_direction);
this->direction = pci_direction;
if (pci_dma_mapping_error(ep->pdev, addr)) {
kfree(this);
return 0;
}
this->dma_addr = addr;
this->pdev = ep->pdev;
this->size = size;
list_add_tail(&this->node, &mem->to_unmap);
return addr;
}
示例4: rtl8822be_tx_fill_special_desc
void rtl8822be_tx_fill_special_desc(struct ieee80211_hw *hw, u8 *pdesc,
u8 *pbd_desc, struct sk_buff *skb,
u8 hw_queue)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
u8 fw_queue;
u8 txdesc_len = 48;
dma_addr_t mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len,
PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, "DMA mapping error");
return;
}
rtl8822be_pre_fill_tx_bd_desc(hw, pbd_desc, pdesc, hw_queue, skb,
mapping);
/* it should be BEACON_QUEUE or H2C_QUEUE,
* so skb=NULL is safe to assert
*/
fw_queue = _rtl8822be_map_hwqueue_to_fwqueue(NULL, hw_queue);
CLEAR_PCI_TX_DESC_CONTENT(pdesc, txdesc_len);
/* common part for BEACON and H2C */
SET_TX_DESC_TXPKTSIZE((u8 *)pdesc, (u16)(skb->len));
SET_TX_DESC_QSEL(pdesc, fw_queue);
if (hw_queue == H2C_QUEUE) {
/* fill H2C */
SET_TX_DESC_OFFSET(pdesc, 0);
} else {
/* fill beacon */
SET_TX_DESC_OFFSET(pdesc, txdesc_len);
SET_TX_DESC_DATARATE(pdesc, DESC_RATE1M);
SET_TX_DESC_SW_SEQ(pdesc, 0);
SET_TX_DESC_RATE_ID(pdesc, 7);
SET_TX_DESC_MACID(pdesc, 0);
SET_TX_DESC_LS(pdesc, 1);
SET_TX_DESC_OFFSET(pdesc, 48);
SET_TX_DESC_USE_RATE(pdesc, 1);
}
RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD, "H2C Tx Cmd Content\n",
pdesc, txdesc_len);
}
示例5: efx_init_rx_buffers_page
/**
* efx_init_rx_buffers_page - create EFX_RX_BATCH page-based RX buffers
*
* @rx_queue: Efx RX queue
*
* This allocates memory for EFX_RX_BATCH receive buffers, maps them for DMA,
* and populates struct efx_rx_buffers for each one. Return a negative error
* code or 0 on success. If a single page can be split between two buffers,
* then the page will either be inserted fully, or not at at all.
*/
static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
{
struct efx_nic *efx = rx_queue->efx;
struct efx_rx_buffer *rx_buf;
struct page *page;
void *page_addr;
unsigned int page_offset;
struct efx_rx_page_state *state;
dma_addr_t dma_addr;
unsigned index, count;
/* We can split a page between two buffers */
BUILD_BUG_ON(EFX_RX_BATCH & 1);
for (count = 0; count < EFX_RX_BATCH; ++count) {
page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC,
efx->rx_buffer_order);
if (unlikely(page == NULL))
return -ENOMEM;
dma_addr = pci_map_page(efx->pci_dev, page, 0,
efx_rx_buf_size(efx),
PCI_DMA_FROMDEVICE);
if (unlikely(pci_dma_mapping_error(efx->pci_dev, dma_addr))) {
__free_pages(page, efx->rx_buffer_order);
return -EIO;
}
page_addr = page_address(page);
state = page_addr;
state->refcnt = 0;
state->dma_addr = dma_addr;
page_addr += sizeof(struct efx_rx_page_state);
dma_addr += sizeof(struct efx_rx_page_state);
page_offset = sizeof(struct efx_rx_page_state);
split:
index = rx_queue->added_count & rx_queue->ptr_mask;
rx_buf = efx_rx_buffer(rx_queue, index);
rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
rx_buf->u.page = page;
rx_buf->page_offset = page_offset + EFX_PAGE_IP_ALIGN;
rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
rx_buf->is_page = true;
++rx_queue->added_count;
++rx_queue->alloc_page_count;
++state->refcnt;
if ((~count & 1) && (efx->rx_buffer_len <= EFX_RX_HALF_PAGE)) {
/* Use the second half of the page */
get_page(page);
dma_addr += (PAGE_SIZE >> 1);
page_addr += (PAGE_SIZE >> 1);
page_offset += (PAGE_SIZE >> 1);
++count;
goto split;
}
}
示例6: radeon_ttm_tt_populate
static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
{
struct radeon_device *rdev;
struct radeon_ttm_tt *gtt = (void *)ttm;
unsigned i;
int r;
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
if (ttm->state != tt_unpopulated)
return 0;
if (slave && ttm->sg) {
drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
gtt->ttm.dma_address, ttm->num_pages);
ttm->state = tt_unbound;
return 0;
}
rdev = radeon_get_rdev(ttm->bdev);
#if __OS_HAS_AGP
if (rdev->flags & RADEON_IS_AGP) {
return ttm_agp_tt_populate(ttm);
}
#endif
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) {
return ttm_dma_populate(>t->ttm, rdev->dev);
}
#endif
#endif
r = ttm_pool_populate(ttm);
if (r) {
return r;
}
for (i = 0; i < ttm->num_pages; i++) {
gtt->ttm.dma_address[i] = pci_map_page(rdev->pdev, ttm->pages[i],
0, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) {
while (--i) {
pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
gtt->ttm.dma_address[i] = 0;
}
ttm_pool_unpopulate(ttm);
return -EFAULT;
}
}
return 0;
}
示例7: sis_init_base_struct_addr
int sis_init_base_struct_addr(struct pqi_ctrl_info *ctrl_info)
{
int rc;
void *base_struct_unaligned;
struct sis_base_struct *base_struct;
struct sis_sync_cmd_params params;
unsigned long error_buffer_paddr;
dma_addr_t bus_address;
base_struct_unaligned = kzalloc(sizeof(*base_struct)
+ SIS_BASE_STRUCT_ALIGNMENT - 1, GFP_KERNEL);
if (!base_struct_unaligned)
return -ENOMEM;
base_struct = PTR_ALIGN(base_struct_unaligned,
SIS_BASE_STRUCT_ALIGNMENT);
error_buffer_paddr = (unsigned long)ctrl_info->error_buffer_dma_handle;
put_unaligned_le32(SIS_BASE_STRUCT_REVISION, &base_struct->revision);
put_unaligned_le32(lower_32_bits(error_buffer_paddr),
&base_struct->error_buffer_paddr_low);
put_unaligned_le32(upper_32_bits(error_buffer_paddr),
&base_struct->error_buffer_paddr_high);
put_unaligned_le32(PQI_ERROR_BUFFER_ELEMENT_LENGTH,
&base_struct->error_buffer_element_length);
put_unaligned_le32(ctrl_info->max_io_slots,
&base_struct->error_buffer_num_elements);
bus_address = pci_map_single(ctrl_info->pci_dev, base_struct,
sizeof(*base_struct), PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(ctrl_info->pci_dev, bus_address)) {
rc = -ENOMEM;
goto out;
}
memset(¶ms, 0, sizeof(params));
params.mailbox[1] = lower_32_bits((u64)bus_address);
params.mailbox[2] = upper_32_bits((u64)bus_address);
params.mailbox[3] = sizeof(*base_struct);
rc = sis_send_sync_cmd(ctrl_info, SIS_CMD_INIT_BASE_STRUCT_ADDRESS,
¶ms);
pci_unmap_single(ctrl_info->pci_dev, bus_address, sizeof(*base_struct),
PCI_DMA_TODEVICE);
out:
kfree(base_struct_unaligned);
return rc;
}
示例8: p54p_refill_rx_ring
static void p54p_refill_rx_ring(struct ieee80211_hw *dev,
int ring_index, struct p54p_desc *ring, u32 ring_limit,
struct sk_buff **rx_buf, u32 index)
{
struct p54p_priv *priv = dev->priv;
struct p54p_ring_control *ring_control = priv->ring_control;
u32 limit, idx, i;
idx = le32_to_cpu(ring_control->host_idx[ring_index]);
limit = idx;
limit -= index;
limit = ring_limit - limit;
i = idx % ring_limit;
while (limit-- > 1) {
struct p54p_desc *desc = &ring[i];
if (!desc->host_addr) {
struct sk_buff *skb;
dma_addr_t mapping;
skb = dev_alloc_skb(priv->common.rx_mtu + 32);
if (!skb)
break;
mapping = pci_map_single(priv->pdev,
skb_tail_pointer(skb),
priv->common.rx_mtu + 32,
PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(priv->pdev, mapping)) {
dev_kfree_skb_any(skb);
dev_err(&priv->pdev->dev,
"RX DMA Mapping error\n");
break;
}
desc->host_addr = cpu_to_le32(mapping);
desc->device_addr = 0; // FIXME: necessary?
desc->len = cpu_to_le16(priv->common.rx_mtu + 32);
desc->flags = 0;
rx_buf[i] = skb;
}
i++;
idx++;
i %= ring_limit;
}
wmb();
ring_control->host_idx[ring_index] = cpu_to_le32(idx);
}
示例9: tso_get_fragment
static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
skb_frag_t *frag)
{
st->unmap_addr = pci_map_page(efx->pci_dev, frag->page,
frag->page_offset, frag->size,
PCI_DMA_TODEVICE);
if (likely(!pci_dma_mapping_error(efx->pci_dev, st->unmap_addr))) {
st->unmap_single = false;
st->unmap_len = frag->size;
st->in_len = frag->size;
st->dma_addr = st->unmap_addr;
return 0;
}
return -ENOMEM;
}
示例10: amdgpu_gart_dummy_page_init
/**
* amdgpu_dummy_page_init - init dummy page used by the driver
*
* @adev: amdgpu_device pointer
*
* Allocate the dummy page used by the driver (all asics).
* This dummy page is used by the driver as a filler for gart entries
* when pages are taken out of the GART
* Returns 0 on sucess, -ENOMEM on failure.
*/
static int amdgpu_gart_dummy_page_init(struct amdgpu_device *adev)
{
struct page *dummy_page = adev->mman.bdev.glob->dummy_read_page;
if (adev->dummy_page_addr)
return 0;
adev->dummy_page_addr = pci_map_page(adev->pdev, dummy_page, 0,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(adev->pdev, adev->dummy_page_addr)) {
dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
adev->dummy_page_addr = 0;
return -ENOMEM;
}
return 0;
}
示例11: mlxsw_pci_wqe_frag_map
static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci *mlxsw_pci, char *wqe,
int index, char *frag_data, size_t frag_len,
int direction)
{
struct pci_dev *pdev = mlxsw_pci->pdev;
dma_addr_t mapaddr;
mapaddr = pci_map_single(pdev, frag_data, frag_len, direction);
if (unlikely(pci_dma_mapping_error(pdev, mapaddr))) {
dev_err_ratelimited(&pdev->dev, "failed to dma map tx frag\n");
return -EIO;
}
mlxsw_pci_wqe_address_set(wqe, index, mapaddr);
mlxsw_pci_wqe_byte_count_set(wqe, index, frag_len);
return 0;
}
示例12: tso_get_fragment
/**
* tso_get_fragment - record fragment details and map for DMA
* @st: TSO state
* @efx: Efx NIC
* @data: Pointer to fragment data
* @len: Length of fragment
*
* Record fragment details and map for DMA. Return 0 on success, or
* -%ENOMEM if DMA mapping fails.
*/
static inline int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
int len, struct page *page, int page_off)
{
st->ifc.unmap_addr = pci_map_page(efx->pci_dev, page, page_off,
len, PCI_DMA_TODEVICE);
if (likely(!pci_dma_mapping_error(efx->pci_dev, st->ifc.unmap_addr))) {
st->ifc.unmap_len = len;
st->ifc.len = len;
st->ifc.dma_addr = st->ifc.unmap_addr;
st->ifc.page = page;
st->ifc.page_off = page_off;
return 0;
}
return -ENOMEM;
}
示例13: tso_get_head_fragment
static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx,
const struct sk_buff *skb)
{
int hl = st->header_len;
int len = skb_headlen(skb) - hl;
st->unmap_addr = pci_map_single(efx->pci_dev, skb->data + hl,
len, PCI_DMA_TODEVICE);
if (likely(!pci_dma_mapping_error(efx->pci_dev, st->unmap_addr))) {
st->unmap_single = true;
st->unmap_len = len;
st->in_len = len;
st->dma_addr = st->unmap_addr;
return 0;
}
return -ENOMEM;
}
示例14: radeon_ttm_tt_populate
static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
{
struct radeon_device *rdev;
struct radeon_ttm_tt *gtt = (void *)ttm;
unsigned i;
int r;
if (ttm->state != tt_unpopulated)
return 0;
rdev = radeon_get_rdev(ttm->bdev);
#if __OS_HAS_AGP
if (rdev->flags & RADEON_IS_AGP) {
return ttm_agp_tt_populate(ttm);
}
#endif
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) {
return ttm_dma_populate(>t->ttm, rdev->dev);
}
#endif
r = ttm_pool_populate(ttm);
if (r) {
return r;
}
for (i = 0; i < ttm->num_pages; i++) {
gtt->ttm.dma_address[i] = pci_map_page(rdev->pdev, ttm->pages[i],
0, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) {
while (--i) {
pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
gtt->ttm.dma_address[i] = 0;
}
ttm_pool_unpopulate(ttm);
return -EFAULT;
}
}
return 0;
}
示例15: pearl_skb2rbd_attach
static int pearl_skb2rbd_attach(struct qtnf_pcie_pearl_state *ps, u16 index)
{
struct qtnf_pcie_bus_priv *priv = &ps->base;
struct qtnf_pearl_rx_bd *rxbd;
struct sk_buff *skb;
dma_addr_t paddr;
skb = __netdev_alloc_skb_ip_align(NULL, SKB_BUF_SIZE, GFP_ATOMIC);
if (!skb) {
priv->rx_skb[index] = NULL;
return -ENOMEM;
}
priv->rx_skb[index] = skb;
rxbd = &ps->rx_bd_vbase[index];
paddr = pci_map_single(priv->pdev, skb->data,
SKB_BUF_SIZE, PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(priv->pdev, paddr)) {
pr_err("skb DMA mapping error: %pad\n", &paddr);
return -ENOMEM;
}
/* keep rx skb paddrs in rx buffer descriptors for cleanup purposes */
rxbd->addr = cpu_to_le32(QTN_HOST_LO32(paddr));
rxbd->addr_h = cpu_to_le32(QTN_HOST_HI32(paddr));
rxbd->info = 0x0;
priv->rx_bd_w_index = index;
/* sync up all descriptor updates */
wmb();
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
writel(QTN_HOST_HI32(paddr),
PCIE_HDP_HHBM_BUF_PTR_H(ps->pcie_reg_base));
#endif
writel(QTN_HOST_LO32(paddr),
PCIE_HDP_HHBM_BUF_PTR(ps->pcie_reg_base));
writel(index, PCIE_HDP_TX_HOST_Q_WR_PTR(ps->pcie_reg_base));
return 0;
}