本文整理汇总了C++中dev_alloc_skb函数的典型用法代码示例。如果您正苦于以下问题:C++ dev_alloc_skb函数的具体用法?C++ dev_alloc_skb怎么用?C++ dev_alloc_skb使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了dev_alloc_skb函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: usb_in_complete
/*
* Decode frames received on the B/D channel.
* Note that this function will be called continously
* with 64Kbit/s / 16Kbit/s of data and hence it will be
* called 50 times per second with 20 ISOC descriptors.
* Called at interrupt.
*/
static void usb_in_complete(struct urb *urb)
{
struct st5481_in *in = urb->context;
unsigned char *ptr;
struct sk_buff *skb;
int len, count, status;
if (unlikely(urb->status < 0)) {
switch (urb->status) {
case -ENOENT:
case -ESHUTDOWN:
case -ECONNRESET:
DBG(1,"urb killed status %d", urb->status);
return; // Give up
default:
WARNING("urb status %d",urb->status);
break;
}
}
DBG_ISO_PACKET(0x80,urb);
len = st5481_isoc_flatten(urb);
ptr = urb->transfer_buffer;
while (len > 0) {
if (in->mode == L1_MODE_TRANS) {
memcpy(in->rcvbuf, ptr, len);
status = len;
len = 0;
} else {
status = isdnhdlc_decode(&in->hdlc_state, ptr, len, &count,
in->rcvbuf, in->bufsize);
ptr += count;
len -= count;
}
if (status > 0) {
// Good frame received
DBG(4,"count=%d",status);
DBG_PACKET(0x400, in->rcvbuf, status);
if (!(skb = dev_alloc_skb(status))) {
WARNING("receive out of memory\n");
break;
}
memcpy(skb_put(skb, status), in->rcvbuf, status);
in->hisax_if->l1l2(in->hisax_if, PH_DATA | INDICATION, skb);
} else if (status == -HDLC_CRC_ERROR) {
INFO("CRC error");
} else if (status == -HDLC_FRAMING_ERROR) {
INFO("framing error");
} else if (status == -HDLC_LENGTH_ERROR) {
INFO("length error");
}
}
// Prepare URB for next transfer
urb->dev = in->adapter->usb_dev;
urb->actual_length = 0;
SUBMIT_URB(urb, GFP_ATOMIC);
}
示例2: mesh_path_error_tx
/**
* mesh_path_error_tx - Sends a PERR mesh management frame
*
* @ttl: allowed remaining hops
* @target: broken destination
* @target_sn: SN of the broken destination
* @target_rcode: reason code for this PERR
* @ra: node this frame is addressed to
* @sdata: local mesh subif
*
* Note: This function may be called with driver locks taken that the driver
* also acquires in the TX path. To avoid a deadlock we don't transmit the
* frame directly but add it to the pending queue instead.
*/
int mesh_path_error_tx(struct ieee80211_sub_if_data *sdata,
u8 ttl, const u8 *target, u32 target_sn,
u16 target_rcode, const u8 *ra)
{
struct ieee80211_local *local = sdata->local;
struct sk_buff *skb;
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
struct ieee80211_mgmt *mgmt;
u8 *pos, ie_len;
int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.mesh_action) +
sizeof(mgmt->u.action.u.mesh_action);
if (time_before(jiffies, ifmsh->next_perr))
return -EAGAIN;
skb = dev_alloc_skb(local->tx_headroom +
sdata->encrypt_headroom +
IEEE80211_ENCRYPT_TAILROOM +
hdr_len +
2 + 15 /* PERR IE */);
if (!skb)
return -1;
skb_reserve(skb, local->tx_headroom + sdata->encrypt_headroom);
mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
memset(mgmt, 0, hdr_len);
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_ACTION);
memcpy(mgmt->da, ra, ETH_ALEN);
memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
/* BSSID == SA */
memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
mgmt->u.action.category = WLAN_CATEGORY_MESH_ACTION;
mgmt->u.action.u.mesh_action.action_code =
WLAN_MESH_ACTION_HWMP_PATH_SELECTION;
ie_len = 15;
pos = skb_put(skb, 2 + ie_len);
*pos++ = WLAN_EID_PERR;
*pos++ = ie_len;
/* ttl */
*pos++ = ttl;
/* number of destinations */
*pos++ = 1;
/*
* flags bit, bit 1 is unset if we know the sequence number and
* bit 2 is set if we have a reason code
*/
*pos = 0;
if (!target_sn)
*pos |= MP_F_USN;
if (target_rcode)
*pos |= MP_F_RCODE;
pos++;
memcpy(pos, target, ETH_ALEN);
pos += ETH_ALEN;
put_unaligned_le32(target_sn, pos);
pos += 4;
put_unaligned_le16(target_rcode, pos);
/* see note in function header */
prepare_frame_for_deferred_tx(sdata, skb);
ifmsh->next_perr = TU_TO_EXP_TIME(
ifmsh->mshcfg.dot11MeshHWMPperrMinInterval);
ieee80211_add_pending_skb(local, skb);
return 0;
}
示例3: cmpk_message_handle_tx
bool cmpk_message_handle_tx(
struct net_device *dev,
u8 *code_virtual_address,
u32 packettype,
u32 buffer_len)
{
bool rt_status = true;
struct r8192_priv *priv = rtllib_priv(dev);
u16 frag_threshold;
u16 frag_length = 0, frag_offset = 0;
struct rt_firmware *pfirmware = priv->pFirmware;
struct sk_buff *skb;
unsigned char *seg_ptr;
struct cb_desc *tcb_desc;
u8 bLastIniPkt;
struct tx_fwinfo_8190pci *pTxFwInfo = NULL;
RT_TRACE(COMP_CMDPKT, "%s(),buffer_len is %d\n", __func__, buffer_len);
firmware_init_param(dev);
frag_threshold = pfirmware->cmdpacket_frag_thresold;
do {
if ((buffer_len - frag_offset) > frag_threshold) {
frag_length = frag_threshold ;
bLastIniPkt = 0;
} else {
frag_length = (u16)(buffer_len - frag_offset);
bLastIniPkt = 1;
}
skb = dev_alloc_skb(frag_length +
priv->rtllib->tx_headroom + 4);
if (skb == NULL) {
rt_status = false;
goto Failed;
}
memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev));
tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
tcb_desc->queue_index = TXCMD_QUEUE;
tcb_desc->bCmdOrInit = DESC_PACKET_TYPE_NORMAL;
tcb_desc->bLastIniPkt = bLastIniPkt;
tcb_desc->pkt_size = frag_length;
seg_ptr = skb_put(skb, priv->rtllib->tx_headroom);
pTxFwInfo = (struct tx_fwinfo_8190pci *)seg_ptr;
memset(pTxFwInfo, 0, sizeof(struct tx_fwinfo_8190pci));
memset(pTxFwInfo, 0x12, 8);
seg_ptr = skb_put(skb, frag_length);
memcpy(seg_ptr, code_virtual_address, (u32)frag_length);
priv->rtllib->softmac_hard_start_xmit(skb, dev);
code_virtual_address += frag_length;
frag_offset += frag_length;
} while (frag_offset < buffer_len);
write_nic_byte(dev, TPPoll, TPPoll_CQ);
Failed:
return rt_status;
}
示例4: rt2x00mac_tx_rts_cts
static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev,
struct data_queue *queue,
struct sk_buff *frag_skb)
{
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(frag_skb);
struct ieee80211_tx_info *rts_info;
struct sk_buff *skb;
unsigned int data_length;
int retval = 0;
if (tx_info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
data_length = sizeof(struct ieee80211_cts);
else
data_length = sizeof(struct ieee80211_rts);
skb = dev_alloc_skb(data_length + rt2x00dev->hw->extra_tx_headroom);
if (unlikely(!skb)) {
rt2x00_warn(rt2x00dev, "Failed to create RTS/CTS frame\n");
return -ENOMEM;
}
skb_reserve(skb, rt2x00dev->hw->extra_tx_headroom);
skb_put(skb, data_length);
/*
* Copy TX information over from original frame to
* RTS/CTS frame. Note that we set the no encryption flag
* since we don't want this frame to be encrypted.
* RTS frames should be acked, while CTS-to-self frames
* should not. The ready for TX flag is cleared to prevent
* it being automatically send when the descriptor is
* written to the hardware.
*/
memcpy(skb->cb, frag_skb->cb, sizeof(skb->cb));
rts_info = IEEE80211_SKB_CB(skb);
rts_info->control.rates[0].flags &= ~IEEE80211_TX_RC_USE_RTS_CTS;
rts_info->control.rates[0].flags &= ~IEEE80211_TX_RC_USE_CTS_PROTECT;
if (tx_info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
rts_info->flags |= IEEE80211_TX_CTL_NO_ACK;
else
rts_info->flags &= ~IEEE80211_TX_CTL_NO_ACK;
/* Disable hardware encryption */
rts_info->control.hw_key = NULL;
/*
* RTS/CTS frame should use the length of the frame plus any
* encryption overhead that will be added by the hardware.
*/
data_length += rt2x00crypto_tx_overhead(rt2x00dev, skb);
if (tx_info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
ieee80211_ctstoself_get(rt2x00dev->hw, tx_info->control.vif,
frag_skb->data, data_length, tx_info,
(struct ieee80211_cts *)(skb->data));
else
ieee80211_rts_get(rt2x00dev->hw, tx_info->control.vif,
frag_skb->data, data_length, tx_info,
(struct ieee80211_rts *)(skb->data));
retval = rt2x00queue_write_tx_frame(queue, skb, NULL, true);
if (retval) {
dev_kfree_skb_any(skb);
rt2x00_warn(rt2x00dev, "Failed to send RTS/CTS frame\n");
}
return retval;
}
示例5: mace_rx
/* ----------------------------------------------------------------------------
mace_rx
Receives packets.
---------------------------------------------------------------------------- */
static int mace_rx(struct net_device *dev, unsigned char RxCnt)
{
mace_private *lp = netdev_priv(dev);
unsigned int ioaddr = dev->base_addr;
unsigned char rx_framecnt;
unsigned short rx_status;
while (
((rx_framecnt = inb(ioaddr + AM2150_RCV_FRAME_COUNT)) > 0) &&
(rx_framecnt <= 12) && /* rx_framecnt==0xFF if card is extracted. */
(RxCnt--)
) {
rx_status = inw(ioaddr + AM2150_RCV);
pr_debug("%s: in mace_rx(), framecnt 0x%X, rx_status"
" 0x%X.\n", dev->name, rx_framecnt, rx_status);
if (rx_status & MACE_RCVFS_RCVSTS) { /* Error, update stats. */
lp->linux_stats.rx_errors++;
if (rx_status & MACE_RCVFS_OFLO) {
lp->mace_stats.oflo++;
}
if (rx_status & MACE_RCVFS_CLSN) {
lp->mace_stats.clsn++;
}
if (rx_status & MACE_RCVFS_FRAM) {
lp->mace_stats.fram++;
}
if (rx_status & MACE_RCVFS_FCS) {
lp->mace_stats.fcs++;
}
} else {
short pkt_len = (rx_status & ~MACE_RCVFS_RCVSTS) - 4;
/* Auto Strip is off, always subtract 4 */
struct sk_buff *skb;
lp->mace_stats.rfs_rntpc += inb(ioaddr + AM2150_RCV);
/* runt packet count */
lp->mace_stats.rfs_rcvcc += inb(ioaddr + AM2150_RCV);
/* rcv collision count */
pr_debug(" receiving packet size 0x%X rx_status"
" 0x%X.\n", pkt_len, rx_status);
skb = dev_alloc_skb(pkt_len+2);
if (skb != NULL) {
skb_reserve(skb, 2);
insw(ioaddr + AM2150_RCV, skb_put(skb, pkt_len), pkt_len>>1);
if (pkt_len & 1)
*(skb_tail_pointer(skb) - 1) = inb(ioaddr + AM2150_RCV);
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb); /* Send the packet to the upper (protocol) layers. */
lp->linux_stats.rx_packets++;
lp->linux_stats.rx_bytes += pkt_len;
outb(0xFF, ioaddr + AM2150_RCV_NEXT); /* skip to next frame */
continue;
} else {
pr_debug("%s: couldn't allocate a sk_buff of size"
" %d.\n", dev->name, pkt_len);
lp->linux_stats.rx_dropped++;
}
}
示例6: myri_rx
static void myri_rx(struct myri_eth *mp, struct net_device *dev)
{
struct recvq __iomem *rq = mp->rq;
struct recvq __iomem *rqa = mp->rqack;
int entry = sbus_readl(&rqa->head);
int limit = sbus_readl(&rqa->tail);
int drops;
DRX(("entry[%d] limit[%d] ", entry, limit));
if (entry == limit)
return;
drops = 0;
DRX(("\n"));
while (entry != limit) {
struct myri_rxd __iomem *rxdack = &rqa->myri_rxd[entry];
u32 csum = sbus_readl(&rxdack->csum);
int len = sbus_readl(&rxdack->myri_scatters[0].len);
int index = sbus_readl(&rxdack->ctx);
struct myri_rxd __iomem *rxd = &rq->myri_rxd[sbus_readl(&rq->tail)];
struct sk_buff *skb = mp->rx_skbs[index];
/* Ack it. */
sbus_writel(NEXT_RX(entry), &rqa->head);
/* Check for errors. */
DRX(("rxd[%d]: %p len[%d] csum[%08x] ", entry, rxd, len, csum));
dma_sync_single_for_cpu(&mp->myri_op->dev,
sbus_readl(&rxd->myri_scatters[0].addr),
RX_ALLOC_SIZE, DMA_FROM_DEVICE);
if (len < (ETH_HLEN + MYRI_PAD_LEN) || (skb->data[0] != MYRI_PAD_LEN)) {
DRX(("ERROR["));
dev->stats.rx_errors++;
if (len < (ETH_HLEN + MYRI_PAD_LEN)) {
DRX(("BAD_LENGTH] "));
dev->stats.rx_length_errors++;
} else {
DRX(("NO_PADDING] "));
dev->stats.rx_frame_errors++;
}
/* Return it to the LANAI. */
drop_it:
drops++;
DRX(("DROP "));
dev->stats.rx_dropped++;
dma_sync_single_for_device(&mp->myri_op->dev,
sbus_readl(&rxd->myri_scatters[0].addr),
RX_ALLOC_SIZE,
DMA_FROM_DEVICE);
sbus_writel(RX_ALLOC_SIZE, &rxd->myri_scatters[0].len);
sbus_writel(index, &rxd->ctx);
sbus_writel(1, &rxd->num_sg);
sbus_writel(NEXT_RX(sbus_readl(&rq->tail)), &rq->tail);
goto next;
}
DRX(("len[%d] ", len));
if (len > RX_COPY_THRESHOLD) {
struct sk_buff *new_skb;
u32 dma_addr;
DRX(("BIGBUFF "));
new_skb = myri_alloc_skb(RX_ALLOC_SIZE, GFP_ATOMIC);
if (new_skb == NULL) {
DRX(("skb_alloc(FAILED) "));
goto drop_it;
}
dma_unmap_single(&mp->myri_op->dev,
sbus_readl(&rxd->myri_scatters[0].addr),
RX_ALLOC_SIZE,
DMA_FROM_DEVICE);
mp->rx_skbs[index] = new_skb;
new_skb->dev = dev;
skb_put(new_skb, RX_ALLOC_SIZE);
dma_addr = dma_map_single(&mp->myri_op->dev,
new_skb->data,
RX_ALLOC_SIZE,
DMA_FROM_DEVICE);
sbus_writel(dma_addr, &rxd->myri_scatters[0].addr);
sbus_writel(RX_ALLOC_SIZE, &rxd->myri_scatters[0].len);
sbus_writel(index, &rxd->ctx);
sbus_writel(1, &rxd->num_sg);
sbus_writel(NEXT_RX(sbus_readl(&rq->tail)), &rq->tail);
/* Trim the original skb for the netif. */
DRX(("trim(%d) ", len));
skb_trim(skb, len);
} else {
struct sk_buff *copy_skb = dev_alloc_skb(len);
DRX(("SMALLBUFF "));
if (copy_skb == NULL) {
DRX(("dev_alloc_skb(FAILED) "));
goto drop_it;
}
/* DMA sync already done above. */
copy_skb->dev = dev;
DRX(("resv_and_put "));
skb_put(copy_skb, len);
skb_copy_from_linear_data(skb, copy_skb->data, len);
//.........这里部分代码省略.........
示例7: kingsun_net_open
static int kingsun_net_open(struct net_device *netdev)
{
struct kingsun_cb *kingsun = netdev_priv(netdev);
int err = -ENOMEM;
char hwname[16];
kingsun->receiving = 0;
kingsun->rx_buff.in_frame = FALSE;
kingsun->rx_buff.state = OUTSIDE_FRAME;
kingsun->rx_buff.truesize = IRDA_SKB_MAX_MTU;
kingsun->rx_buff.skb = dev_alloc_skb(IRDA_SKB_MAX_MTU);
if (!kingsun->rx_buff.skb)
goto free_mem;
skb_reserve(kingsun->rx_buff.skb, 1);
kingsun->rx_buff.head = kingsun->rx_buff.skb->data;
do_gettimeofday(&kingsun->rx_time);
kingsun->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!kingsun->rx_urb)
goto free_mem;
kingsun->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!kingsun->tx_urb)
goto free_mem;
sprintf(hwname, "usb#%d", kingsun->usbdev->devnum);
kingsun->irlap = irlap_open(netdev, &kingsun->qos, hwname);
if (!kingsun->irlap) {
err("kingsun-sir: irlap_open failed");
goto free_mem;
}
usb_fill_int_urb(kingsun->rx_urb, kingsun->usbdev,
usb_rcvintpipe(kingsun->usbdev, kingsun->ep_in),
kingsun->in_buf, kingsun->max_rx,
kingsun_rcv_irq, kingsun, 1);
kingsun->rx_urb->status = 0;
err = usb_submit_urb(kingsun->rx_urb, GFP_KERNEL);
if (err) {
err("kingsun-sir: first urb-submit failed: %d", err);
goto close_irlap;
}
netif_start_queue(netdev);
return 0;
close_irlap:
irlap_close(kingsun->irlap);
free_mem:
if (kingsun->tx_urb) {
usb_free_urb(kingsun->tx_urb);
kingsun->tx_urb = NULL;
}
if (kingsun->rx_urb) {
usb_free_urb(kingsun->rx_urb);
kingsun->rx_urb = NULL;
}
if (kingsun->rx_buff.skb) {
kfree_skb(kingsun->rx_buff.skb);
kingsun->rx_buff.skb = NULL;
kingsun->rx_buff.head = NULL;
}
return err;
}
示例8: Bchan_rcv_bh
static void
Bchan_rcv_bh(struct BCState *bcs)
{
struct IsdnCardState *cs = bcs->cs;
struct amd7930_hw *hw = &bcs->hw.amd7930;
struct sk_buff *skb;
int len;
if (cs->debug & L1_DEB_HSCX) {
char tmp[1024];
sprintf(tmp, "amd7930_Bchan_rcv (%d/%d)",
hw->rv_buff_in, hw->rv_buff_out);
debugl1(cs, tmp);
QuickHex(tmp, hw->rv_buff + hw->rv_buff_out,
RCV_BUFSIZE/RCV_BUFBLKS);
debugl1(cs, tmp);
}
do {
if (bcs->mode == L1_MODE_HDLC) {
while ((len = read_raw_hdlc_data(hw->hdlc_state,
hw->rv_buff + hw->rv_buff_out, RCV_BUFSIZE/RCV_BUFBLKS,
hw->rv_skb->tail, HSCX_BUFMAX))) {
if (len > 0 && (cs->debug & L1_DEB_HSCX_FIFO)) {
char tmp[1024];
char *t = tmp;
t += sprintf(t, "amd7930_Bchan_rcv %c cnt %d", bcs->channel ? 'B' : 'A', len);
QuickHex(t, hw->rv_skb->tail, len);
debugl1(cs, tmp);
}
if (len > HSCX_BUFMAX/2) {
/* Large packet received */
if (!(skb = dev_alloc_skb(HSCX_BUFMAX))) {
printk(KERN_WARNING "amd7930: receive out of memory");
} else {
skb_put(hw->rv_skb, len);
skb_queue_tail(&bcs->rqueue, hw->rv_skb);
hw->rv_skb = skb;
bcs->event |= 1 << B_RCVBUFREADY;
queue_task(&bcs->tqueue, &tq_immediate);
}
} else if (len > 0) {
/* Small packet received */
if (!(skb = dev_alloc_skb(len))) {
printk(KERN_WARNING "amd7930: receive out of memory\n");
} else {
memcpy(skb_put(skb, len), hw->rv_skb->tail, len);
skb_queue_tail(&bcs->rqueue, skb);
bcs->event |= 1 << B_RCVBUFREADY;
queue_task(&bcs->tqueue, &tq_immediate);
mark_bh(IMMEDIATE_BH);
}
} else {
/* Reception Error */
/* printk("amd7930: B channel receive error\n"); */
}
}
} else if (bcs->mode == L1_MODE_TRANS) {
if (!(skb = dev_alloc_skb(RCV_BUFSIZE/RCV_BUFBLKS))) {
printk(KERN_WARNING "amd7930: receive out of memory\n");
} else {
memcpy(skb_put(skb, RCV_BUFSIZE/RCV_BUFBLKS),
hw->rv_buff + hw->rv_buff_out,
RCV_BUFSIZE/RCV_BUFBLKS);
skb_queue_tail(&bcs->rqueue, skb);
bcs->event |= 1 << B_RCVBUFREADY;
queue_task(&bcs->tqueue, &tq_immediate);
mark_bh(IMMEDIATE_BH);
}
}
if (hw->rv_buff_in == hw->rv_buff_out) {
/* Buffer was filled up - need to restart receiver */
amd7930_brecv(0, bcs->channel,
hw->rv_buff + hw->rv_buff_in,
RCV_BUFSIZE/RCV_BUFBLKS,
(void *) &Bchan_recv_callback,
(void *) bcs);
}
hw->rv_buff_out += RCV_BUFSIZE/RCV_BUFBLKS;
hw->rv_buff_out %= RCV_BUFSIZE;
} while (hw->rv_buff_in != hw->rv_buff_out);
}
示例9: sdio_mux_read_data
static void sdio_mux_read_data(struct work_struct *work)
{
struct sk_buff *skb_mux;
void *ptr = 0;
int sz, rc, len = 0;
struct sdio_mux_hdr *hdr;
DBG("%s: reading\n", __func__);
/* should probably have a separate read lock */
mutex_lock(&sdio_mux_lock);
sz = sdio_read_avail(sdio_mux_ch);
DBG("%s: read avail %d\n", __func__, sz);
if (sz <= 0) {
if (sz)
pr_err("%s: read avail failed %d\n", __func__, sz);
mutex_unlock(&sdio_mux_lock);
return;
}
/* net_ip_aling is probably not required */
if (sdio_partial_pkt.valid)
len = sdio_partial_pkt.skb->len;
/* if allocation fails attempt to get a smaller chunk of mem */
do {
skb_mux = dev_alloc_skb(sz + NET_IP_ALIGN + len);
if (skb_mux)
break;
pr_err("%s: cannot allocate skb of size:%d\n", __func__,
sz + NET_IP_ALIGN + len);
if (sz + NET_IP_ALIGN + len <= PAGE_SIZE) {
pr_err("%s: allocation failed\n", __func__);
mutex_unlock(&sdio_mux_lock);
return;
}
sz /= 2;
} while (1);
skb_reserve(skb_mux, NET_IP_ALIGN + len);
ptr = skb_put(skb_mux, sz);
/* half second wakelock is fine? */
wake_lock_timeout(&sdio_mux_ch_wakelock, HZ / 2);
rc = sdio_read(sdio_mux_ch, ptr, sz);
DBG("%s: read %d\n", __func__, rc);
if (rc) {
pr_err("%s: sdio read failed %d\n", __func__, rc);
dev_kfree_skb_any(skb_mux);
mutex_unlock(&sdio_mux_lock);
queue_work(sdio_mux_workqueue, &work_sdio_mux_read);
return;
}
mutex_unlock(&sdio_mux_lock);
DBG_INC_READ_CNT(sz);
DBG("%s: head %p data %p tail %p end %p len %d\n", __func__,
skb_mux->head, skb_mux->data, skb_mux->tail,
skb_mux->end, skb_mux->len);
/* move to a separate function */
/* probably do skb_pull instead of pointer adjustment */
hdr = handle_sdio_partial_pkt(skb_mux);
while ((void *)hdr < (void *)skb_mux->tail) {
if (((void *)hdr + sizeof(*hdr)) > (void *)skb_mux->tail) {
/* handle partial header */
sdio_mux_save_partial_pkt(hdr, skb_mux);
break;
}
if (hdr->magic_num != SDIO_MUX_HDR_MAGIC_NO) {
pr_err("%s: packet error\n", __func__);
break;
}
hdr = handle_sdio_mux_command(hdr, skb_mux);
}
dev_kfree_skb_any(skb_mux);
DBG("%s: read done\n", __func__);
queue_work(sdio_mux_workqueue, &work_sdio_mux_read);
}
示例10: dhd_init_wlan_mem
static int dhd_init_wlan_mem(void)
{
int i;
int j;
for (i = 0; i < DHD_SKB_1PAGE_BUF_NUM; i++) {
wlan_static_skb[i] = dev_alloc_skb(DHD_SKB_1PAGE_BUFSIZE);
if (!wlan_static_skb[i])
goto err_skb_alloc;
}
for (i = DHD_SKB_1PAGE_BUF_NUM; i < WLAN_SKB_1_2PAGE_BUF_NUM; i++) {
wlan_static_skb[i] = dev_alloc_skb(DHD_SKB_2PAGE_BUFSIZE);
if (!wlan_static_skb[i])
goto err_skb_alloc;
}
#if !defined(CONFIG_BCMDHD_PCIE)
wlan_static_skb[i] = dev_alloc_skb(DHD_SKB_4PAGE_BUFSIZE);
if (!wlan_static_skb[i])
goto err_skb_alloc;
#endif /* !CONFIG_BCMDHD_PCIE */
for (i = 0; i < PREALLOC_WLAN_SEC_NUM; i++) {
if (wlan_mem_array[i].size > 0) {
wlan_mem_array[i].mem_ptr =
kmalloc(wlan_mem_array[i].size, GFP_KERNEL);
if (!wlan_mem_array[i].mem_ptr)
goto err_mem_alloc;
}
}
wlan_static_scan_buf0 = kmalloc(WLAN_SCAN_BUF_SIZE, GFP_KERNEL);
if (!wlan_static_scan_buf0) {
pr_err("Failed to alloc wlan_static_scan_buf0\n");
goto err_mem_alloc;
}
wlan_static_scan_buf1 = kmalloc(WLAN_SCAN_BUF_SIZE, GFP_KERNEL);
if (!wlan_static_scan_buf1) {
pr_err("Failed to alloc wlan_static_scan_buf1\n");
goto err_mem_alloc;
}
wlan_static_dhd_info_buf = kmalloc(WLAN_DHD_INFO_BUF_SIZE, GFP_KERNEL);
if (!wlan_static_dhd_info_buf) {
pr_err("Failed to alloc wlan_static_dhd_info_buf\n");
goto err_mem_alloc;
}
#ifdef CONFIG_BCMDHD_PCIE
wlan_static_if_flow_lkup = kmalloc(WLAN_DHD_IF_FLOW_LKUP_SIZE,
GFP_KERNEL);
if (!wlan_static_if_flow_lkup) {
pr_err("Failed to alloc wlan_static_if_flow_lkup\n");
goto err_mem_alloc;
}
#else
wlan_static_dhd_wlfc_buf = kmalloc(WLAN_DHD_WLFC_BUF_SIZE,
GFP_KERNEL);
if (!wlan_static_dhd_wlfc_buf) {
pr_err("Failed to alloc wlan_static_dhd_wlfc_buf\n");
goto err_mem_alloc;
}
#endif /* CONFIG_BCMDHD_PCIE */
#ifdef CONFIG_BCMDHD_DEBUG_PAGEALLOC
wlan_static_dhd_memdump_buf = kmalloc(WLAN_DHD_MEMDUMP_SIZE, GFP_KERNEL);
if (!wlan_static_dhd_memdump_buf) {
pr_err("Failed to alloc wlan_static_dhd_memdump_buf\n");
goto err_mem_alloc;
}
wlan_static_dhd_memdump_ram = kmalloc(WLAN_DHD_MEMDUMP_SIZE, GFP_KERNEL);
if (!wlan_static_dhd_memdump_ram) {
pr_err("Failed to alloc wlan_static_dhd_memdump_ram\n");
goto err_mem_alloc;
}
#endif /* CONFIG_BCMDHD_DEBUG_PAGEALLOC */
pr_err("%s: WIFI MEM Allocated\n", __FUNCTION__);
return 0;
err_mem_alloc:
#ifdef CONFIG_BCMDHD_DEBUG_PAGEALLOC
if (wlan_static_dhd_memdump_ram)
kfree(wlan_static_dhd_memdump_ram);
if (wlan_static_dhd_memdump_buf)
kfree(wlan_static_dhd_memdump_buf);
#endif /* CONFIG_BCMDHD_DEBUG_PAGEALLOC */
#ifdef CONFIG_BCMDHD_PCIE
if (wlan_static_if_flow_lkup)
kfree(wlan_static_if_flow_lkup);
#else
if (wlan_static_dhd_wlfc_buf)
kfree(wlan_static_dhd_wlfc_buf);
#endif /* CONFIG_BCMDHD_PCIE */
//.........这里部分代码省略.........
示例11: rtl8192eu_hostap_mgnt_xmit_entry
s32 rtl8192eu_hostap_mgnt_xmit_entry(_adapter *padapter, _pkt *pkt)
{
#if 0
//#ifdef PLATFORM_LINUX
u16 fc;
int rc, len, pipe;
unsigned int bmcst, tid, qsel;
struct sk_buff *skb, *pxmit_skb;
struct urb *urb;
unsigned char *pxmitbuf;
struct tx_desc *ptxdesc;
struct rtw_ieee80211_hdr *tx_hdr;
struct hostapd_priv *phostapdpriv = padapter->phostapdpriv;
struct net_device *pnetdev = padapter->pnetdev;
HAL_DATA_TYPE *pHalData = GET_HAL_DATA(padapter);
struct dvobj_priv *pdvobj = adapter_to_dvobj(padapter);
//DBG_8192C("%s\n", __FUNCTION__);
skb = pkt;
len = skb->len;
tx_hdr = (struct rtw_ieee80211_hdr *)(skb->data);
fc = le16_to_cpu(tx_hdr->frame_ctl);
bmcst = IS_MCAST(tx_hdr->addr1);
if ((fc & RTW_IEEE80211_FCTL_FTYPE) != RTW_IEEE80211_FTYPE_MGMT)
goto _exit;
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)) // http://www.mail-archive.com/[email protected]/msg17214.html
pxmit_skb = dev_alloc_skb(len + TXDESC_SIZE);
#else
pxmit_skb = netdev_alloc_skb(pnetdev, len + TXDESC_SIZE);
#endif
if(!pxmit_skb)
goto _exit;
pxmitbuf = pxmit_skb->data;
urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!urb) {
goto _exit;
}
// ----- fill tx desc -----
ptxdesc = (struct tx_desc *)pxmitbuf;
_rtw_memset(ptxdesc, 0, sizeof(*ptxdesc));
//offset 0
ptxdesc->txdw0 |= cpu_to_le32(len&0x0000ffff);
ptxdesc->txdw0 |= cpu_to_le32(((TXDESC_SIZE+OFFSET_SZ)<<OFFSET_SHT)&0x00ff0000);//default = 32 bytes for TX Desc
ptxdesc->txdw0 |= cpu_to_le32(OWN | FSG | LSG);
if(bmcst)
{
ptxdesc->txdw0 |= cpu_to_le32(BIT(24));
}
//offset 4
ptxdesc->txdw1 |= cpu_to_le32(0x00);//MAC_ID
ptxdesc->txdw1 |= cpu_to_le32((0x12<<QSEL_SHT)&0x00001f00);
ptxdesc->txdw1 |= cpu_to_le32((0x06<< 16) & 0x000f0000);//b mode
//offset 8
//offset 12
ptxdesc->txdw3 |= cpu_to_le32((le16_to_cpu(tx_hdr->seq_ctl)<<16)&0xffff0000);
//offset 16
ptxdesc->txdw4 |= cpu_to_le32(BIT(8));//driver uses rate
//offset 20
//HW append seq
ptxdesc->txdw4 |= cpu_to_le32(BIT(7)); // Hw set sequence number
ptxdesc->txdw3 |= cpu_to_le32((8 <<28)); //set bit3 to 1. Suugested by TimChen. 2009.12.29.
rtl8192e_cal_txdesc_chksum(ptxdesc);
// ----- end of fill tx desc -----
//
skb_put(pxmit_skb, len + TXDESC_SIZE);
pxmitbuf = pxmitbuf + TXDESC_SIZE;
_rtw_memcpy(pxmitbuf, skb->data, len);
//DBG_8192C("mgnt_xmit, len=%x\n", pxmit_skb->len);
// ----- prepare urb for submit -----
//translate DMA FIFO addr to pipehandle
//pipe = ffaddr2pipehdl(pdvobj, MGT_QUEUE_INX);
pipe = usb_sndbulkpipe(pdvobj->pusbdev, pHalData->Queue2EPNum[(u8)MGT_QUEUE_INX]&0x0f);
//.........这里部分代码省略.........
示例12: z8530_rx_done
static void z8530_rx_done(struct z8530_channel *c)
{
struct sk_buff *skb;
int ct;
/*
* Is our receive engine in DMA mode
*/
if(c->rxdma_on)
{
/*
* Save the ready state and the buffer currently
* being used as the DMA target
*/
int ready=c->dma_ready;
unsigned char *rxb=c->rx_buf[c->dma_num];
unsigned long flags;
/*
* Complete this DMA. Neccessary to find the length
*/
flags=claim_dma_lock();
disable_dma(c->rxdma);
clear_dma_ff(c->rxdma);
c->rxdma_on=0;
ct=c->mtu-get_dma_residue(c->rxdma);
if(ct<0)
ct=2; /* Shit happens.. */
c->dma_ready=0;
/*
* Normal case: the other slot is free, start the next DMA
* into it immediately.
*/
if(ready)
{
c->dma_num^=1;
set_dma_mode(c->rxdma, DMA_MODE_READ|0x10);
set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[c->dma_num]));
set_dma_count(c->rxdma, c->mtu);
c->rxdma_on = 1;
enable_dma(c->rxdma);
/* Stop any frames that we missed the head of
from passing */
write_zsreg(c, R0, RES_Rx_CRC);
}
else
/* Can't occur as we dont reenable the DMA irq until
after the flip is done */
printk(KERN_WARNING "%s: DMA flip overrun!\n",
c->netdevice->name);
release_dma_lock(flags);
/*
* Shove the old buffer into an sk_buff. We can't DMA
* directly into one on a PC - it might be above the 16Mb
* boundary. Optimisation - we could check to see if we
* can avoid the copy. Optimisation 2 - make the memcpy
* a copychecksum.
*/
skb = dev_alloc_skb(ct);
if (skb == NULL) {
c->netdevice->stats.rx_dropped++;
printk(KERN_WARNING "%s: Memory squeeze.\n",
c->netdevice->name);
} else {
skb_put(skb, ct);
skb_copy_to_linear_data(skb, rxb, ct);
c->netdevice->stats.rx_packets++;
c->netdevice->stats.rx_bytes += ct;
}
c->dma_ready = 1;
} else {
RT_LOCK;
skb = c->skb;
/*
* The game we play for non DMA is similar. We want to
* get the controller set up for the next packet as fast
* as possible. We potentially only have one byte + the
* fifo length for this. Thus we want to flip to the new
* buffer and then mess around copying and allocating
* things. For the current case it doesn't matter but
* if you build a system where the sync irq isnt blocked
* by the kernel IRQ disable then you need only block the
* sync IRQ for the RT_LOCK area.
*
*/
ct=c->count;
c->skb = c->skb2;
c->count = 0;
c->max = c->mtu;
//.........这里部分代码省略.........
示例13: vnt_alloc_bufs
static bool vnt_alloc_bufs(struct vnt_private *priv)
{
struct vnt_usb_send_context *tx_context;
struct vnt_rcb *rcb;
int ii;
for (ii = 0; ii < priv->num_tx_context; ii++) {
tx_context = kmalloc(sizeof(struct vnt_usb_send_context),
GFP_KERNEL);
if (!tx_context)
goto free_tx;
priv->tx_context[ii] = tx_context;
tx_context->priv = priv;
tx_context->pkt_no = ii;
/* allocate URBs */
tx_context->urb = usb_alloc_urb(0, GFP_KERNEL);
if (!tx_context->urb)
goto free_tx;
tx_context->in_use = false;
}
for (ii = 0; ii < priv->num_rcb; ii++) {
priv->rcb[ii] = kzalloc(sizeof(struct vnt_rcb), GFP_KERNEL);
if (!priv->rcb[ii]) {
dev_err(&priv->usb->dev,
"failed to allocate rcb no %d\n", ii);
goto free_rx_tx;
}
rcb = priv->rcb[ii];
rcb->priv = priv;
/* allocate URBs */
rcb->urb = usb_alloc_urb(0, GFP_KERNEL);
if (!rcb->urb)
goto free_rx_tx;
rcb->skb = dev_alloc_skb(priv->rx_buf_sz);
if (!rcb->skb)
goto free_rx_tx;
rcb->in_use = false;
/* submit rx urb */
if (vnt_submit_rx_urb(priv, rcb))
goto free_rx_tx;
}
priv->interrupt_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!priv->interrupt_urb)
goto free_rx_tx;
priv->int_buf.data_buf = kmalloc(MAX_INTERRUPT_SIZE, GFP_KERNEL);
if (!priv->int_buf.data_buf) {
usb_free_urb(priv->interrupt_urb);
goto free_rx_tx;
}
return true;
free_rx_tx:
vnt_free_rx_bufs(priv);
free_tx:
vnt_free_tx_bufs(priv);
return false;
}
示例14: seeq8005_rx
/* We have a good packet(s), get it/them out of the buffers. */
static void seeq8005_rx(struct net_device *dev)
{
struct net_local *lp = netdev_priv(dev);
int boguscount = 10;
int pkt_hdr;
int ioaddr = dev->base_addr;
do {
int next_packet;
int pkt_len;
int i;
int status;
status = inw(SEEQ_STATUS);
outw( lp->receive_ptr, SEEQ_DMAAR);
outw(SEEQCMD_FIFO_READ | SEEQCMD_RX_INT_ACK | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
wait_for_buffer(dev);
next_packet = ntohs(inw(SEEQ_BUFFER));
pkt_hdr = inw(SEEQ_BUFFER);
if (net_debug>2) {
printk("%s: 0x%04x recv next=0x%04x, hdr=0x%04x\n",dev->name,lp->receive_ptr,next_packet,pkt_hdr);
}
if ((next_packet == 0) || ((pkt_hdr & SEEQPKTH_CHAIN)==0)) { /* Read all the frames? */
return; /* Done for now */
}
if ((pkt_hdr & SEEQPKTS_DONE)==0)
break;
if (next_packet < lp->receive_ptr) {
pkt_len = (next_packet + 0x10000 - ((DEFAULT_TEA+1)<<8)) - lp->receive_ptr - 4;
} else {
pkt_len = next_packet - lp->receive_ptr - 4;
}
if (next_packet < ((DEFAULT_TEA+1)<<8)) { /* is the next_packet address sane? */
printk("%s: recv packet ring corrupt, resetting board\n",dev->name);
seeq8005_init(dev,1);
return;
}
lp->receive_ptr = next_packet;
if (net_debug>2) {
printk("%s: recv len=0x%04x\n",dev->name,pkt_len);
}
if (pkt_hdr & SEEQPKTS_ANY_ERROR) { /* There was an error. */
dev->stats.rx_errors++;
if (pkt_hdr & SEEQPKTS_SHORT) dev->stats.rx_frame_errors++;
if (pkt_hdr & SEEQPKTS_DRIB) dev->stats.rx_frame_errors++;
if (pkt_hdr & SEEQPKTS_OVERSIZE) dev->stats.rx_over_errors++;
if (pkt_hdr & SEEQPKTS_CRC_ERR) dev->stats.rx_crc_errors++;
/* skip over this packet */
outw( SEEQCMD_FIFO_WRITE | SEEQCMD_DMA_INT_ACK | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
outw( (lp->receive_ptr & 0xff00)>>8, SEEQ_REA);
} else {
/* Malloc up new buffer. */
struct sk_buff *skb;
unsigned char *buf;
skb = dev_alloc_skb(pkt_len);
if (skb == NULL) {
printk("%s: Memory squeeze, dropping packet.\n", dev->name);
dev->stats.rx_dropped++;
break;
}
skb_reserve(skb, 2); /* align data on 16 byte */
buf = skb_put(skb,pkt_len);
insw(SEEQ_BUFFER, buf, (pkt_len + 1) >> 1);
if (net_debug>2) {
char * p = buf;
printk("%s: recv ",dev->name);
for(i=0;i<14;i++) {
printk("%02x ",*(p++)&0xff);
}
printk("\n");
}
skb->protocol=eth_type_trans(skb,dev);
netif_rx(skb);
dev->stats.rx_packets++;
dev->stats.rx_bytes += pkt_len;
}
} while ((--boguscount) && (pkt_hdr & SEEQPKTH_CHAIN));
示例15: _rtl_usb_rx_process_agg
//.........这里部分代码省略.........
struct rtl_usb *rtlusb = (struct rtl_usb *)_urb->context;
struct ieee80211_hw *hw = usb_get_intfdata(rtlusb->intf);
struct rtl_priv *rtlpriv = rtl_priv(hw);
int err = 0;
if (unlikely(IS_USB_STOP(rtlusb)))
goto free;
if (likely(0 == _urb->status)) {
unsigned int padding;
struct sk_buff *skb;
unsigned int qlen;
unsigned int size = _urb->actual_length;
struct ieee80211_hdr *hdr;
if (size < RTL_RX_DESC_SIZE + sizeof(struct ieee80211_hdr)) {
RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
"Too short packet from bulk IN! (len: %d)\n",
size);
goto resubmit;
}
qlen = skb_queue_len(&rtlusb->rx_queue);
if (qlen >= __RX_SKB_MAX_QUEUED) {
RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
"Pending RX skbuff queue full! (qlen: %d)\n",
qlen);
goto resubmit;
}
hdr = (void *)(_urb->transfer_buffer + RTL_RX_DESC_SIZE);
padding = _rtl_rx_get_padding(hdr, size - RTL_RX_DESC_SIZE);
skb = dev_alloc_skb(size + __RADIO_TAP_SIZE_RSV + padding);
if (!skb) {
RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
"Can't allocate skb for bulk IN!\n");
goto resubmit;
}
_rtl_install_trx_info(rtlusb, skb, rtlusb->in_ep);
/* Make sure the payload data is 4 byte aligned. */
skb_reserve(skb, padding);
/* reserve some space for mac80211's radiotap */
skb_reserve(skb, __RADIO_TAP_SIZE_RSV);
memcpy(skb_put(skb, size), _urb->transfer_buffer, size);
skb_queue_tail(&rtlusb->rx_queue, skb);
tasklet_schedule(&rtlusb->rx_work_tasklet);
goto resubmit;
}
switch (_urb->status) {
/* disconnect */
case -ENOENT:
case -ECONNRESET:
case -ENODEV:
case -ESHUTDOWN:
goto free;
default:
break;
}