本文整理汇总了C++中pci_map_single函数的典型用法代码示例。如果您正苦于以下问题:C++ pci_map_single函数的具体用法?C++ pci_map_single怎么用?C++ pci_map_single使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了pci_map_single函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: ipath_map_single
/**
* ipath_map_single - a safety wrapper around pci_map_single()
*
* Same idea as ipath_map_page().
*/
dma_addr_t ipath_map_single(struct pci_dev *hwdev, void *ptr, size_t size,
int direction)
{
dma_addr_t phys;
phys = pci_map_single(hwdev, ptr, size, direction);
if (phys == 0) {
pci_unmap_single(hwdev, phys, size, direction);
phys = pci_map_single(hwdev, ptr, size, direction);
/*
* FIXME: If we get 0 again, we should keep this page,
* map another, then free the 0 page.
*/
}
return phys;
}
示例2: osl_dma_map
uint BCMFASTPATH
osl_dma_map(osl_t *osh, void *va, uint size, int direction)
{
int dir;
ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
return (pci_map_single(osh->pdev, va, size, dir));
}
示例3: ivtv_udma_alloc
/* User DMA Buffers */
void ivtv_udma_alloc(struct ivtv *itv)
{
if (itv->udma.SG_handle == 0) {
/* Map DMA Page Array Buffer */
itv->udma.SG_handle = pci_map_single(itv->pdev, itv->udma.SGarray,
sizeof(itv->udma.SGarray), PCI_DMA_TODEVICE);
ivtv_udma_sync_for_cpu(itv);
}
}
示例4: rtl8822be_tx_fill_special_desc
void rtl8822be_tx_fill_special_desc(struct ieee80211_hw *hw, u8 *pdesc,
u8 *pbd_desc, struct sk_buff *skb,
u8 hw_queue)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
u8 fw_queue;
u8 txdesc_len = 48;
dma_addr_t mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len,
PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, "DMA mapping error");
return;
}
rtl8822be_pre_fill_tx_bd_desc(hw, pbd_desc, pdesc, hw_queue, skb,
mapping);
/* it should be BEACON_QUEUE or H2C_QUEUE,
* so skb=NULL is safe to assert
*/
fw_queue = _rtl8822be_map_hwqueue_to_fwqueue(NULL, hw_queue);
CLEAR_PCI_TX_DESC_CONTENT(pdesc, txdesc_len);
/* common part for BEACON and H2C */
SET_TX_DESC_TXPKTSIZE((u8 *)pdesc, (u16)(skb->len));
SET_TX_DESC_QSEL(pdesc, fw_queue);
if (hw_queue == H2C_QUEUE) {
/* fill H2C */
SET_TX_DESC_OFFSET(pdesc, 0);
} else {
/* fill beacon */
SET_TX_DESC_OFFSET(pdesc, txdesc_len);
SET_TX_DESC_DATARATE(pdesc, DESC_RATE1M);
SET_TX_DESC_SW_SEQ(pdesc, 0);
SET_TX_DESC_RATE_ID(pdesc, 7);
SET_TX_DESC_MACID(pdesc, 0);
SET_TX_DESC_LS(pdesc, 1);
SET_TX_DESC_OFFSET(pdesc, 48);
SET_TX_DESC_USE_RATE(pdesc, 1);
}
RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD, "H2C Tx Cmd Content\n",
pdesc, txdesc_len);
}
示例5: alloc_list
/* allocate and initialize Tx and Rx descriptors */
static void
alloc_list (struct net_device *dev)
{
struct netdev_private *np = netdev_priv(dev);
int i;
np->cur_rx = np->cur_tx = 0;
np->old_rx = np->old_tx = 0;
np->rx_buf_sz = (dev->mtu <= 1500 ? PACKET_SIZE : dev->mtu + 32);
/* Initialize Tx descriptors, TFDListPtr leaves in start_xmit(). */
for (i = 0; i < TX_RING_SIZE; i++) {
np->tx_skbuff[i] = NULL;
np->tx_ring[i].status = cpu_to_le64 (TFDDone);
np->tx_ring[i].next_desc = cpu_to_le64 (np->tx_ring_dma +
((i+1)%TX_RING_SIZE) *
sizeof (struct netdev_desc));
}
/* Initialize Rx descriptors */
for (i = 0; i < RX_RING_SIZE; i++) {
np->rx_ring[i].next_desc = cpu_to_le64 (np->rx_ring_dma +
((i + 1) % RX_RING_SIZE) *
sizeof (struct netdev_desc));
np->rx_ring[i].status = 0;
np->rx_ring[i].fraginfo = 0;
np->rx_skbuff[i] = NULL;
}
/* Allocate the rx buffers */
for (i = 0; i < RX_RING_SIZE; i++) {
/* Allocated fixed size of skbuff */
struct sk_buff *skb;
skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz);
np->rx_skbuff[i] = skb;
if (skb == NULL) {
printk (KERN_ERR
"%s: alloc_list: allocate Rx buffer error! ",
dev->name);
break;
}
/* Rubicon now supports 40 bits of addressing space. */
np->rx_ring[i].fraginfo =
cpu_to_le64 ( pci_map_single (
np->pdev, skb->data, np->rx_buf_sz,
PCI_DMA_FROMDEVICE));
np->rx_ring[i].fraginfo |= cpu_to_le64((u64)np->rx_buf_sz << 48);
}
/* Set RFDListPtr */
writel (np->rx_ring_dma, dev->base_addr + RFDListPtr0);
writel (0, dev->base_addr + RFDListPtr1);
return;
}
示例6: dma_map_single
dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, size_t size,
enum dma_data_direction direction)
{
if (dev->bus == &pci_bus_type)
return pci_map_single(to_pci_dev(dev), cpu_addr, size, (int)direction);
if (dev->bus == &vio_bus_type)
return vio_map_single(to_vio_dev(dev), cpu_addr, size, direction);
BUG();
return (dma_addr_t)0;
}
示例7: alloc_list
static void
alloc_list (struct net_device *dev)
{
struct netdev_private *np = netdev_priv(dev);
int i;
np->cur_rx = np->cur_tx = 0;
np->old_rx = np->old_tx = 0;
np->rx_buf_sz = (dev->mtu <= 1500 ? PACKET_SIZE : dev->mtu + 32);
/* */
for (i = 0; i < TX_RING_SIZE; i++) {
np->tx_skbuff[i] = NULL;
np->tx_ring[i].status = cpu_to_le64 (TFDDone);
np->tx_ring[i].next_desc = cpu_to_le64 (np->tx_ring_dma +
((i+1)%TX_RING_SIZE) *
sizeof (struct netdev_desc));
}
/* */
for (i = 0; i < RX_RING_SIZE; i++) {
np->rx_ring[i].next_desc = cpu_to_le64 (np->rx_ring_dma +
((i + 1) % RX_RING_SIZE) *
sizeof (struct netdev_desc));
np->rx_ring[i].status = 0;
np->rx_ring[i].fraginfo = 0;
np->rx_skbuff[i] = NULL;
}
/* */
for (i = 0; i < RX_RING_SIZE; i++) {
/* */
struct sk_buff *skb;
skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz);
np->rx_skbuff[i] = skb;
if (skb == NULL) {
printk (KERN_ERR
"%s: alloc_list: allocate Rx buffer error! ",
dev->name);
break;
}
/* */
np->rx_ring[i].fraginfo =
cpu_to_le64 ( pci_map_single (
np->pdev, skb->data, np->rx_buf_sz,
PCI_DMA_FROMDEVICE));
np->rx_ring[i].fraginfo |= cpu_to_le64((u64)np->rx_buf_sz << 48);
}
/* */
writel (np->rx_ring_dma, dev->base_addr + RFDListPtr0);
writel (0, dev->base_addr + RFDListPtr1);
}
示例8: alloc_buffer
static int alloc_buffer(struct vino_device *v, int size)
{
int count, i, j, err;
err = i = 0;
count = (size / PAGE_SIZE + 4) & ~3;
v->desc = (unsigned long *) kmalloc(count * sizeof(unsigned long),
GFP_KERNEL);
if (!v->desc)
return -ENOMEM;
v->dma_desc.cpu = pci_alloc_consistent(NULL, PAGE_RATIO * (count+4) *
sizeof(dma_addr_t),
&v->dma_desc.dma);
if (!v->dma_desc.cpu) {
err = -ENOMEM;
goto out_free_desc;
}
while (i < count) {
dma_addr_t dma;
v->desc[i] = get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!v->desc[i])
break;
dma = pci_map_single(NULL, (void *)v->desc[i], PAGE_SIZE,
PCI_DMA_FROMDEVICE);
for (j = 0; j < PAGE_RATIO; j++)
v->dma_desc.cpu[PAGE_RATIO * i + j ] =
dma + VINO_PAGE_SIZE * j;
mem_map_reserve(virt_to_page(v->desc[i]));
i++;
}
v->dma_desc.cpu[PAGE_RATIO * count] = VINO_DESC_STOP;
if (i-- < count) {
while (i >= 0) {
mem_map_unreserve(virt_to_page(v->desc[i]));
pci_unmap_single(NULL, v->dma_desc.cpu[PAGE_RATIO * i],
PAGE_SIZE, PCI_DMA_FROMDEVICE);
free_page(v->desc[i]);
i--;
}
pci_free_consistent(NULL,
PAGE_RATIO * (count+4) * sizeof(dma_addr_t),
(void *)v->dma_desc.cpu, v->dma_desc.dma);
err = -ENOBUFS;
goto out_free_desc;
}
v->page_count = count;
return 0;
out_free_desc:
kfree(v->desc);
return err;
}
示例9: epic_start_xmit
static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct epic_private *ep = dev->priv;
int entry, free_count;
u32 ctrl_word;
unsigned long flags;
if (skb_padto(skb, ETH_ZLEN))
return 0;
/* Caution: the write order is important here, set the field with the
"ownership" bit last. */
/* Calculate the next Tx descriptor entry. */
spin_lock_irqsave(&ep->lock, flags);
free_count = ep->cur_tx - ep->dirty_tx;
entry = ep->cur_tx % TX_RING_SIZE;
ep->tx_skbuff[entry] = skb;
ep->tx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, skb->data,
skb->len, PCI_DMA_TODEVICE);
if (free_count < TX_QUEUE_LEN/2) {/* Typical path */
ctrl_word = cpu_to_le32(0x100000); /* No interrupt */
} else if (free_count == TX_QUEUE_LEN/2) {
ctrl_word = cpu_to_le32(0x140000); /* Tx-done intr. */
} else if (free_count < TX_QUEUE_LEN - 1) {
ctrl_word = cpu_to_le32(0x100000); /* No Tx-done intr. */
} else {
/* Leave room for an additional entry. */
ctrl_word = cpu_to_le32(0x140000); /* Tx-done intr. */
ep->tx_full = 1;
}
ep->tx_ring[entry].buflength = ctrl_word | cpu_to_le32(skb->len);
ep->tx_ring[entry].txstatus =
((skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN) << 16)
| cpu_to_le32(DescOwn);
ep->cur_tx++;
if (ep->tx_full)
netif_stop_queue(dev);
spin_unlock_irqrestore(&ep->lock, flags);
/* Trigger an immediate transmit demand. */
outl(TxQueued, dev->base_addr + COMMAND);
dev->trans_start = jiffies;
if (debug > 4)
printk(KERN_DEBUG "%s: Queued Tx packet size %d to slot %d, "
"flag %2.2x Tx status %8.8x.\n",
dev->name, (int)skb->len, entry, ctrl_word,
(int)inl(dev->base_addr + TxSTAT));
return 0;
}
示例10: pci_eth_start_xmit
static netdev_tx_t pci_eth_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct pci_eth_private *priv = netdev_priv(dev);
struct pci_eth_descriptor *descptr;
void __iomem *ioaddr = dev->base_addr;
unsigned long flags;
/* Critical Section */
spin_lock_irqsave(&priv->lock, flags);
/* TX resource check */
if (!priv->tx_free_desc) {
spin_unlock_irqrestore(&priv->lock, flags);
netif_stop_queue(dev);
netdev_err(dev, ": no tx descriptor\n");
return NETDEV_TX_BUSY;
}
/* Statistic Counter */
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
/* Decrement free descriptors counter */
priv->tx_free_desc--;
/* Set TX descriptor & Transmit it */
descptr = priv->tx_insert_ptr;
if (skb->len < ETH_ZLEN)
descptr->len = ETH_ZLEN;
else
descptr->len = skb->len;
descptr->skb_ptr = skb;
descptr->buf = cpu_to_le32(pci_map_single(priv->pdev,
skb->data, skb->len, PCI_DMA_TODEVICE));
descptr->status = DSC_OWNER_MAC;
skb_tx_timestamp(skb);
/* TODO: Trigger the MAC to check the TX descriptor - start DMA
* transaction.
*/
/* After DMA transaction perform the following check */
/* If no tx resource, stop */
if (!priv->tx_free_desc)
netif_stop_queue(dev);
spin_unlock_irqrestore(&priv->lock, flags);
return NETDEV_TX_OK;
}
示例11: ad1889_start_wav
static void ad1889_start_wav(ad1889_state_t *state)
{
unsigned long flags;
struct dmabuf *dmabuf = &state->dmabuf;
int cnt;
u16 tmp;
spin_lock_irqsave(&state->card->lock, flags);
if (dmabuf->dma_len) /* DMA already in flight */
goto skip_dma;
/* setup dma */
cnt = dmabuf->wr_ptr - dmabuf->rd_ptr;
if (cnt == 0) /* done - don't need to do anything */
goto skip_dma;
/* If the wr_ptr has wrapped, only map to the end */
if (cnt < 0)
cnt = DMA_SIZE - dmabuf->rd_ptr;
dmabuf->dma_handle = pci_map_single(ad1889_dev->pci,
dmabuf->rawbuf + dmabuf->rd_ptr,
cnt, PCI_DMA_TODEVICE);
dmabuf->dma_len = cnt;
dmabuf->ready = 1;
DBG("Starting playback at 0x%p for %ld bytes\n", dmabuf->rawbuf +
dmabuf->rd_ptr, dmabuf->dma_len);
/* load up the current register set */
AD1889_WRITEL(ad1889_dev, AD_DMAWAVCC, cnt);
AD1889_WRITEL(ad1889_dev, AD_DMAWAVICC, cnt);
AD1889_WRITEL(ad1889_dev, AD_DMAWAVCA, dmabuf->dma_handle);
/* TODO: for now we load the base registers with the same thing */
AD1889_WRITEL(ad1889_dev, AD_DMAWAVBC, cnt);
AD1889_WRITEL(ad1889_dev, AD_DMAWAVIBC, cnt);
AD1889_WRITEL(ad1889_dev, AD_DMAWAVBA, dmabuf->dma_handle);
/* and we're off to the races... */
AD1889_WRITEL(ad1889_dev, AD_DMACHSS, 0x8);
tmp = AD1889_READW(ad1889_dev, AD_DSWSMC);
tmp |= 0x0400; /* set WAEN */
AD1889_WRITEW(ad1889_dev, AD_DSWSMC, tmp);
(void) AD1889_READW(ad1889_dev, AD_DSWSMC); /* flush posted PCI write */
dmabuf->enable |= DAC_RUNNING;
skip_dma:
spin_unlock_irqrestore(&state->card->lock, flags);
}
示例12: rtl8169_start_xmit
static int
rtl8169_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct rtl8169_private *tp = dev->priv;
void *ioaddr = tp->mmio_addr;
int entry = tp->cur_tx % NUM_TX_DESC;
u32 len = skb->len;
if (unlikely(skb->len < ETH_ZLEN)) {
skb = skb_padto(skb, ETH_ZLEN);
if (!skb)
goto err_update_stats;
len = ETH_ZLEN;
}
spin_lock_irq(&tp->lock);
if (!(le32_to_cpu(tp->TxDescArray[entry].status) & OWNbit)) {
dma_addr_t mapping;
mapping = pci_map_single(tp->pci_dev, skb->data, len,
PCI_DMA_TODEVICE);
tp->Tx_skbuff[entry] = skb;
tp->TxDescArray[entry].addr = cpu_to_le64(mapping);
tp->TxDescArray[entry].status = cpu_to_le32(OWNbit | FSbit |
LSbit | len | (EORbit * !((entry + 1) % NUM_TX_DESC)));
RTL_W8(TxPoll, 0x40); //set polling bit
dev->trans_start = jiffies;
tp->cur_tx++;
} else
goto err_drop;
if ((tp->cur_tx - NUM_TX_DESC) == tp->dirty_tx) {
netif_stop_queue(dev);
}
out:
spin_unlock_irq(&tp->lock);
return 0;
err_drop:
dev_kfree_skb(skb);
err_update_stats:
tp->stats.tx_dropped++;
goto out;
}
示例13: islpci_mgmt_rx_fill
/*
* Fill the receive queue for management frames with fresh buffers.
*/
int
islpci_mgmt_rx_fill(struct net_device *ndev)
{
islpci_private *priv = netdev_priv(ndev);
isl38xx_control_block *cb = /* volatile not needed */
(isl38xx_control_block *) priv->control_block;
u32 curr = le32_to_cpu(cb->driver_curr_frag[ISL38XX_CB_RX_MGMTQ]);
#if VERBOSE > SHOW_ERROR_MESSAGES
DEBUG(SHOW_FUNCTION_CALLS, "islpci_mgmt_rx_fill\n");
#endif
while (curr - priv->index_mgmt_rx < ISL38XX_CB_MGMT_QSIZE) {
u32 index = curr % ISL38XX_CB_MGMT_QSIZE;
struct islpci_membuf *buf = &priv->mgmt_rx[index];
isl38xx_fragment *frag = &cb->rx_data_mgmt[index];
if (buf->mem == NULL) {
buf->mem = kmalloc(MGMT_FRAME_SIZE, GFP_ATOMIC);
if (!buf->mem) {
// printk(KERN_WARNING
;
return -ENOMEM;
}
buf->size = MGMT_FRAME_SIZE;
}
if (buf->pci_addr == 0) {
buf->pci_addr = pci_map_single(priv->pdev, buf->mem,
MGMT_FRAME_SIZE,
PCI_DMA_FROMDEVICE);
if (!buf->pci_addr) {
// printk(KERN_WARNING
;
return -ENOMEM;
}
}
/* be safe: always reset control block information */
frag->size = cpu_to_le16(MGMT_FRAME_SIZE);
frag->flags = 0;
frag->address = cpu_to_le32(buf->pci_addr);
curr++;
/* The fragment address in the control block must have
* been written before announcing the frame buffer to
* device */
wmb();
cb->driver_curr_frag[ISL38XX_CB_RX_MGMTQ] = cpu_to_le32(curr);
}
return 0;
}
示例14: sis_init_base_struct_addr
int sis_init_base_struct_addr(struct pqi_ctrl_info *ctrl_info)
{
int rc;
void *base_struct_unaligned;
struct sis_base_struct *base_struct;
struct sis_sync_cmd_params params;
unsigned long error_buffer_paddr;
dma_addr_t bus_address;
base_struct_unaligned = kzalloc(sizeof(*base_struct)
+ SIS_BASE_STRUCT_ALIGNMENT - 1, GFP_KERNEL);
if (!base_struct_unaligned)
return -ENOMEM;
base_struct = PTR_ALIGN(base_struct_unaligned,
SIS_BASE_STRUCT_ALIGNMENT);
error_buffer_paddr = (unsigned long)ctrl_info->error_buffer_dma_handle;
put_unaligned_le32(SIS_BASE_STRUCT_REVISION, &base_struct->revision);
put_unaligned_le32(lower_32_bits(error_buffer_paddr),
&base_struct->error_buffer_paddr_low);
put_unaligned_le32(upper_32_bits(error_buffer_paddr),
&base_struct->error_buffer_paddr_high);
put_unaligned_le32(PQI_ERROR_BUFFER_ELEMENT_LENGTH,
&base_struct->error_buffer_element_length);
put_unaligned_le32(ctrl_info->max_io_slots,
&base_struct->error_buffer_num_elements);
bus_address = pci_map_single(ctrl_info->pci_dev, base_struct,
sizeof(*base_struct), PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(ctrl_info->pci_dev, bus_address)) {
rc = -ENOMEM;
goto out;
}
memset(¶ms, 0, sizeof(params));
params.mailbox[1] = lower_32_bits((u64)bus_address);
params.mailbox[2] = upper_32_bits((u64)bus_address);
params.mailbox[3] = sizeof(*base_struct);
rc = sis_send_sync_cmd(ctrl_info, SIS_CMD_INIT_BASE_STRUCT_ADDRESS,
¶ms);
pci_unmap_single(ctrl_info->pci_dev, bus_address, sizeof(*base_struct),
PCI_DMA_TODEVICE);
out:
kfree(base_struct_unaligned);
return rc;
}
示例15: p54p_refill_rx_ring
static void p54p_refill_rx_ring(struct ieee80211_hw *dev,
int ring_index, struct p54p_desc *ring, u32 ring_limit,
struct sk_buff **rx_buf, u32 index)
{
struct p54p_priv *priv = dev->priv;
struct p54p_ring_control *ring_control = priv->ring_control;
u32 limit, idx, i;
idx = le32_to_cpu(ring_control->host_idx[ring_index]);
limit = idx;
limit -= index;
limit = ring_limit - limit;
i = idx % ring_limit;
while (limit-- > 1) {
struct p54p_desc *desc = &ring[i];
if (!desc->host_addr) {
struct sk_buff *skb;
dma_addr_t mapping;
skb = dev_alloc_skb(priv->common.rx_mtu + 32);
if (!skb)
break;
mapping = pci_map_single(priv->pdev,
skb_tail_pointer(skb),
priv->common.rx_mtu + 32,
PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(priv->pdev, mapping)) {
dev_kfree_skb_any(skb);
dev_err(&priv->pdev->dev,
"RX DMA Mapping error\n");
break;
}
desc->host_addr = cpu_to_le32(mapping);
desc->device_addr = 0; // FIXME: necessary?
desc->len = cpu_to_le16(priv->common.rx_mtu + 32);
desc->flags = 0;
rx_buf[i] = skb;
}
i++;
idx++;
i %= ring_limit;
}
wmb();
ring_control->host_idx[ring_index] = cpu_to_le32(idx);
}