本文整理汇总了C++中skb_padto函数的典型用法代码示例。如果您正苦于以下问题:C++ skb_padto函数的具体用法?C++ skb_padto怎么用?C++ skb_padto使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了skb_padto函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: net_send_packet
static int net_send_packet(struct sk_buff *skb, struct net_device *dev)
{
struct net_local *lp = dev->priv;
int ioaddr = dev->base_addr;
short length = skb->len;
unsigned char *buf;
unsigned long flags;
/* Block a transmit from overlapping. */
if (length > ETH_FRAME_LEN) {
if (net_debug)
printk("%s: Attempting to send a large packet (%d bytes).\n",
dev->name, length);
return 1;
}
if (length < ETH_ZLEN)
{
skb = skb_padto(skb, ETH_ZLEN);
if(skb == NULL)
return 0;
length = ETH_ZLEN;
}
buf = skb->data;
if (net_debug > 4)
printk("%s: Transmitting a packet of length %lu.\n", dev->name,
(unsigned long)skb->len);
/* We may not start transmitting unless we finish transferring
a packet into the Tx queue. During executing the following
codes we possibly catch a Tx interrupt. Thus we flag off
tx_queue_ready, so that we prevent the interrupt routine
(net_interrupt) to start transmitting. */
spin_lock_irqsave(&lp->lock, flags);
lp->tx_queue_ready = 0;
{
outw(length, ioaddr + DATAPORT);
outsw(ioaddr + DATAPORT, buf, (length + 1) >> 1);
lp->tx_queue++;
lp->tx_queue_len += length + 2;
}
lp->tx_queue_ready = 1;
spin_unlock_irqrestore(&lp->lock, flags);
if (lp->tx_started == 0) {
/* If the Tx is idle, always trigger a transmit. */
outb(0x80 | lp->tx_queue, ioaddr + TX_START);
lp->tx_queue = 0;
lp->tx_queue_len = 0;
dev->trans_start = jiffies;
lp->tx_started = 1;
} else if (lp->tx_queue_len < 4096 - 1502)
/* Yes, there is room for one more packet. */
else
netif_stop_queue(dev);
dev_kfree_skb(skb);
return 0;
}
示例2: seeq8005_send_packet
static int seeq8005_send_packet(struct sk_buff *skb, struct net_device *dev)
{
struct net_local *lp = netdev_priv(dev);
short length = skb->len;
unsigned char *buf;
if (length < ETH_ZLEN) {
skb = skb_padto(skb, ETH_ZLEN);
if (skb == NULL)
return 0;
length = ETH_ZLEN;
}
buf = skb->data;
/* Block a timer-based transmit from overlapping */
netif_stop_queue(dev);
hardware_send_packet(dev, buf, length);
dev->trans_start = jiffies;
lp->stats.tx_bytes += length;
dev_kfree_skb (skb);
/* You might need to clean up and record Tx statistics here. */
return 0;
}
示例3: cpsw_ndo_start_xmit
static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
struct net_device *ndev)
{
struct cpsw_priv *priv = netdev_priv(ndev);
int ret;
ndev->trans_start = jiffies;
if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) {
cpsw_err(priv, tx_err, "packet pad failed\n");
priv->stats.tx_dropped++;
return NETDEV_TX_OK;
}
ret = cpdma_chan_submit(priv->txch, skb, skb->data,
skb->len, GFP_KERNEL);
if (unlikely(ret != 0)) {
cpsw_err(priv, tx_err, "desc submit failed\n");
goto fail;
}
return NETDEV_TX_OK;
fail:
priv->stats.tx_dropped++;
netif_stop_queue(ndev);
return NETDEV_TX_BUSY;
}
示例4: addTag
/**********************************************************************
*%FUNCTION: addTag
*%ARGUMENTS:
* m -- a PPPoE packet
* tag -- tag to add
*%RETURNS:
* -1 if no room in packet; number of bytes added otherwise.
*%DESCRIPTION:
* Inserts a tag as the first tag in a PPPoE packet.
***********************************************************************/
int
addTag(struct sk_buff **m, PPPoETag const *tag)
{
int len = ntohs(tag->length) + TAG_HDR_SIZE;
PPPoEPacket *packet = MTOD(*m, PPPoEPacket *);
int new_size, offset;
struct sk_buff *skb;
if (len + ntohs(packet->length) > MAX_PPPOE_PAYLOAD)
return -1;
offset = HDR_SIZE + ntohs(packet->length);
new_size = len + offset;
skb = skb_padto(*m, new_size);
if (!skb)
return -1;
if (new_size > skb->len)
skb_put(skb, new_size - skb->len);
memcpy(skb->data + offset, (u8 *)tag, len);
*m = skb;
/* In case buf was realloced... */
packet = MTOD(*m, PPPoEPacket *);
packet->length = htons(ntohs(packet->length) + len);
return 0;
}
示例5: ramips_eth_hard_start_xmit
static int
ramips_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct raeth_priv *re = netdev_priv(dev);
struct raeth_tx_info *txi, *txi_next;
struct ramips_tx_dma *txd, *txd_next;
unsigned long tx;
unsigned int tx_next;
dma_addr_t mapped_addr;
if (re->plat->min_pkt_len) {
if (skb->len < re->plat->min_pkt_len) {
if (skb_padto(skb, re->plat->min_pkt_len)) {
printk(KERN_ERR
"ramips_eth: skb_padto failed\n");
kfree_skb(skb);
return 0;
}
skb_put(skb, re->plat->min_pkt_len - skb->len);
}
}
dev->trans_start = jiffies;
mapped_addr = dma_map_single(&re->netdev->dev, skb->data, skb->len,
DMA_TO_DEVICE);
spin_lock(&re->page_lock);
tx = ramips_fe_trr(RAETH_REG_TX_CTX_IDX0);
tx_next = (tx + 1) % NUM_TX_DESC;
txi = &re->tx_info[tx];
txd = txi->tx_desc;
txi_next = &re->tx_info[tx_next];
txd_next = txi_next->tx_desc;
if ((txi->tx_skb) || (txi_next->tx_skb) ||
!(txd->txd2 & TX_DMA_DONE) ||
!(txd_next->txd2 & TX_DMA_DONE))
goto out;
txi->tx_skb = skb;
txd->txd1 = (unsigned int) mapped_addr;
wmb();
txd->txd2 = TX_DMA_LSO | TX_DMA_PLEN0(skb->len);
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
ramips_fe_twr(tx_next, RAETH_REG_TX_CTX_IDX0);
netdev_sent_queue(dev, skb->len);
spin_unlock(&re->page_lock);
return NETDEV_TX_OK;
out:
spin_unlock(&re->page_lock);
dev->stats.tx_dropped++;
kfree_skb(skb);
return NETDEV_TX_OK;
}
示例6: arc_emac_tx
/**
* arc_emac_tx - Starts the data transmission.
* @skb: sk_buff pointer that contains data to be Transmitted.
* @ndev: Pointer to net_device structure.
*
* returns: NETDEV_TX_OK, on success
* NETDEV_TX_BUSY, if any of the descriptors are not free.
*
* This function is invoked from upper layers to initiate transmission.
*/
static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
{
struct arc_emac_priv *priv = netdev_priv(ndev);
unsigned int len, *txbd_curr = &priv->txbd_curr;
struct net_device_stats *stats = &priv->stats;
__le32 *info = &priv->txbd[*txbd_curr].info;
dma_addr_t addr;
if (skb_padto(skb, ETH_ZLEN))
return NETDEV_TX_OK;
len = max_t(unsigned int, ETH_ZLEN, skb->len);
/* EMAC still holds this buffer in its possession.
* CPU must not modify this buffer descriptor
*/
if (unlikely((le32_to_cpu(*info) & OWN_MASK) == FOR_EMAC)) {
netif_stop_queue(ndev);
return NETDEV_TX_BUSY;
}
addr = dma_map_single(&ndev->dev, (void *)skb->data, len,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(&ndev->dev, addr))) {
stats->tx_dropped++;
stats->tx_errors++;
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
dma_unmap_addr_set(&priv->tx_buff[*txbd_curr], addr, addr);
dma_unmap_len_set(&priv->tx_buff[*txbd_curr], len, len);
priv->tx_buff[*txbd_curr].skb = skb;
priv->txbd[*txbd_curr].data = cpu_to_le32(addr);
/* Make sure pointer to data buffer is set */
wmb();
skb_tx_timestamp(skb);
*info = cpu_to_le32(FOR_EMAC | FIRST_OR_LAST_MASK | len);
/* Increment index to point to the next BD */
*txbd_curr = (*txbd_curr + 1) % TX_BD_NUM;
/* Get "info" of the next BD */
info = &priv->txbd[*txbd_curr].info;
/* Check if if Tx BD ring is full - next BD is still owned by EMAC */
if (unlikely((le32_to_cpu(*info) & OWN_MASK) == FOR_EMAC))
netif_stop_queue(ndev);
arc_reg_set(priv, R_STATUS, TXPL_MASK);
return NETDEV_TX_OK;
}
示例7: cpmac_start_xmit
static int cpmac_start_xmit(struct sk_buff *skb)
{
int queue, len, ret;
lock_s(l1);
//struct cpmac_desc *desc;
//struct cpmac_priv *priv = netdev_priv(dev);
//if (unlikely(atomic_read(reset_pending)))
// return NETDEV_TX_BUSY;
//cpmac_write(CPMAC_TX_PTR(queue), (u32)desc_ring[queue].mapping);
// BUG: move this line to the *** location below
notify(cond_irq_can_happen);
if (unlikely(skb_padto(skb, ETH_ZLEN))) {
ret = NETDEV_TX_OK;
} else {
len = max(skb->len, ETH_ZLEN);
//queue = skb_get_queue_mapping(skb);
netif_stop_subqueue(/*queue*/);
//desc = &desc_ring[queue];
if (unlikely(desc_ring[queue].dataflags & CPMAC_OWN)) {
// if (netif_msg_tx_err(priv) && net_ratelimit())
// netdev_warn(dev, "tx dma ring full\n");
ret = NETDEV_TX_BUSY;
} else {
spin_lock(cplock);
spin_unlock(cplock);
desc_ring[queue].dataflags = CPMAC_SOP | CPMAC_EOP | CPMAC_OWN;
desc_ring[queue].skb = skb;
desc_ring[queue].data_mapping = dma_map_single(skb->data, len,
DMA_TO_DEVICE);
desc_ring[queue].hw_data = (u32)desc_ring[queue].data_mapping;
desc_ring[queue].datalen = len;
desc_ring[queue].buflen = len;
// if (unlikely(netif_msg_tx_queued(priv)))
// netdev_dbg(dev, "sending 0x%p, len=%d\n", skb, skb->len);
// if (unlikely(netif_msg_hw(priv)))
// cpmac_dump_desc(dev, &desc_ring[queue]);
// if (unlikely(netif_msg_pktdata(priv)))
// cpmac_dump_skb(dev, skb);
ret = NETDEV_TX_OK;
}
}
// ***
unlock_s(l1);
return ret;
}
示例8: epic_start_xmit
static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct epic_private *ep = dev->priv;
int entry, free_count;
u32 ctrl_word;
unsigned long flags;
if (skb_padto(skb, ETH_ZLEN))
return 0;
/* Caution: the write order is important here, set the field with the
"ownership" bit last. */
/* Calculate the next Tx descriptor entry. */
spin_lock_irqsave(&ep->lock, flags);
free_count = ep->cur_tx - ep->dirty_tx;
entry = ep->cur_tx % TX_RING_SIZE;
ep->tx_skbuff[entry] = skb;
ep->tx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, skb->data,
skb->len, PCI_DMA_TODEVICE);
if (free_count < TX_QUEUE_LEN/2) {/* Typical path */
ctrl_word = cpu_to_le32(0x100000); /* No interrupt */
} else if (free_count == TX_QUEUE_LEN/2) {
ctrl_word = cpu_to_le32(0x140000); /* Tx-done intr. */
} else if (free_count < TX_QUEUE_LEN - 1) {
ctrl_word = cpu_to_le32(0x100000); /* No Tx-done intr. */
} else {
/* Leave room for an additional entry. */
ctrl_word = cpu_to_le32(0x140000); /* Tx-done intr. */
ep->tx_full = 1;
}
ep->tx_ring[entry].buflength = ctrl_word | cpu_to_le32(skb->len);
ep->tx_ring[entry].txstatus =
((skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN) << 16)
| cpu_to_le32(DescOwn);
ep->cur_tx++;
if (ep->tx_full)
netif_stop_queue(dev);
spin_unlock_irqrestore(&ep->lock, flags);
/* Trigger an immediate transmit demand. */
outl(TxQueued, dev->base_addr + COMMAND);
dev->trans_start = jiffies;
if (debug > 4)
printk(KERN_DEBUG "%s: Queued Tx packet size %d to slot %d, "
"flag %2.2x Tx status %8.8x.\n",
dev->name, (int)skb->len, entry, ctrl_word,
(int)inl(dev->base_addr + TxSTAT));
return 0;
}
示例9: rtl8169_start_xmit
static int
rtl8169_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct rtl8169_private *tp = dev->priv;
void *ioaddr = tp->mmio_addr;
int entry = tp->cur_tx % NUM_TX_DESC;
u32 len = skb->len;
if (unlikely(skb->len < ETH_ZLEN)) {
skb = skb_padto(skb, ETH_ZLEN);
if (!skb)
goto err_update_stats;
len = ETH_ZLEN;
}
spin_lock_irq(&tp->lock);
if (!(le32_to_cpu(tp->TxDescArray[entry].status) & OWNbit)) {
dma_addr_t mapping;
mapping = pci_map_single(tp->pci_dev, skb->data, len,
PCI_DMA_TODEVICE);
tp->Tx_skbuff[entry] = skb;
tp->TxDescArray[entry].addr = cpu_to_le64(mapping);
tp->TxDescArray[entry].status = cpu_to_le32(OWNbit | FSbit |
LSbit | len | (EORbit * !((entry + 1) % NUM_TX_DESC)));
RTL_W8(TxPoll, 0x40); //set polling bit
dev->trans_start = jiffies;
tp->cur_tx++;
} else
goto err_drop;
if ((tp->cur_tx - NUM_TX_DESC) == tp->dirty_tx) {
netif_stop_queue(dev);
}
out:
spin_unlock_irq(&tp->lock);
return 0;
err_drop:
dev_kfree_skb(skb);
err_update_stats:
tp->stats.tx_dropped++;
goto out;
}
示例10: ramips_eth_hard_start_xmit
static int
ramips_eth_hard_start_xmit(struct sk_buff* skb, struct net_device *dev)
{
struct raeth_priv *priv = netdev_priv(dev);
unsigned long tx;
unsigned int tx_next;
unsigned int mapped_addr;
unsigned long flags;
if(priv->plat->min_pkt_len)
{
if(skb->len < priv->plat->min_pkt_len)
{
if(skb_padto(skb, priv->plat->min_pkt_len))
{
printk(KERN_ERR "ramips_eth: skb_padto failed\n");
kfree_skb(skb);
return 0;
}
skb_put(skb, priv->plat->min_pkt_len - skb->len);
}
}
dev->trans_start = jiffies;
mapped_addr = (unsigned int)dma_map_single(NULL, skb->data, skb->len,
DMA_TO_DEVICE);
dma_sync_single_for_device(NULL, mapped_addr, skb->len, DMA_TO_DEVICE);
spin_lock_irqsave(&priv->page_lock, flags);
tx = ramips_fe_rr(RAMIPS_TX_CTX_IDX0);
if(tx == NUM_TX_DESC - 1)
tx_next = 0;
else
tx_next = tx + 1;
if((priv->tx_skb[tx]) || (priv->tx_skb[tx_next]) ||
!(priv->tx[tx].txd2 & TX_DMA_DONE) || !(priv->tx[tx_next].txd2 & TX_DMA_DONE))
goto out;
priv->tx[tx].txd1 = mapped_addr;
priv->tx[tx].txd2 &= ~(TX_DMA_PLEN0_MASK | TX_DMA_DONE);
priv->tx[tx].txd2 |= TX_DMA_PLEN0(skb->len);
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
priv->tx_skb[tx] = skb;
wmb();
ramips_fe_wr((tx + 1) % NUM_TX_DESC, RAMIPS_TX_CTX_IDX0);
spin_unlock_irqrestore(&priv->page_lock, flags);
return NETDEV_TX_OK;
out:
spin_unlock_irqrestore(&priv->page_lock, flags);
dev->stats.tx_dropped++;
kfree_skb(skb);
return NETDEV_TX_OK;
}
示例11: cpmac_start_xmit
static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
int queue, len;
struct cpmac_desc *desc;
struct cpmac_priv *priv = netdev_priv(dev);
if (unlikely(atomic_read(&priv->reset_pending)))
return NETDEV_TX_BUSY;
if (unlikely(skb_padto(skb, ETH_ZLEN)))
return NETDEV_TX_OK;
len = max(skb->len, ETH_ZLEN);
queue = skb_get_queue_mapping(skb);
#ifdef CONFIG_NETDEVICES_MULTIQUEUE
netif_stop_subqueue(dev, queue);
#else
netif_stop_queue(dev);
#endif
desc = &priv->desc_ring[queue];
if (unlikely(desc->dataflags & CPMAC_OWN)) {
if (netif_msg_tx_err(priv) && net_ratelimit())
printk(KERN_WARNING "%s: tx dma ring full\n",
dev->name);
return NETDEV_TX_BUSY;
}
spin_lock(&priv->lock);
dev->trans_start = jiffies;
spin_unlock(&priv->lock);
desc->dataflags = CPMAC_SOP | CPMAC_EOP | CPMAC_OWN;
desc->skb = skb;
desc->data_mapping = dma_map_single(&dev->dev, skb->data, len,
DMA_TO_DEVICE);
desc->hw_data = (u32)desc->data_mapping;
desc->datalen = len;
desc->buflen = len;
if (unlikely(netif_msg_tx_queued(priv)))
printk(KERN_DEBUG "%s: sending 0x%p, len=%d\n", dev->name, skb,
skb->len);
if (unlikely(netif_msg_hw(priv)))
cpmac_dump_desc(dev, desc);
if (unlikely(netif_msg_pktdata(priv)))
cpmac_dump_skb(dev, skb);
cpmac_write(priv->regs, CPMAC_TX_PTR(queue), (u32)desc->mapping);
return NETDEV_TX_OK;
}
示例12: mc32_send_packet
static int mc32_send_packet(struct sk_buff *skb, struct net_device *dev)
{
struct mc32_local *lp = netdev_priv(dev);
u32 head = atomic_read(&lp->tx_ring_head);
volatile struct skb_header *p, *np;
netif_stop_queue(dev);
if(atomic_read(&lp->tx_count)==0) {
return 1;
}
skb = skb_padto(skb, ETH_ZLEN);
if (skb == NULL) {
netif_wake_queue(dev);
return 0;
}
atomic_dec(&lp->tx_count);
/* P is the last sending/sent buffer as a pointer */
p=lp->tx_ring[head].p;
head = next_tx(head);
/* NP is the buffer we will be loading */
np=lp->tx_ring[head].p;
/* We will need this to flush the buffer out */
lp->tx_ring[head].skb=skb;
np->length = unlikely(skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len;
np->data = isa_virt_to_bus(skb->data);
np->status = 0;
np->control = CONTROL_EOP | CONTROL_EOL;
wmb();
/*
* The new frame has been setup; we can now
* let the interrupt handler and card "see" it
*/
atomic_set(&lp->tx_ring_head, head);
p->control &= ~CONTROL_EOL;
netif_wake_queue(dev);
return 0;
}
示例13: eepro_send_packet
static netdev_tx_t eepro_send_packet(struct sk_buff *skb,
struct net_device *dev)
{
struct eepro_local *lp = netdev_priv(dev);
unsigned long flags;
int ioaddr = dev->base_addr;
short length = skb->len;
if (net_debug > 5)
printk(KERN_DEBUG "%s: entering eepro_send_packet routine.\n", dev->name);
if (length < ETH_ZLEN) {
if (skb_padto(skb, ETH_ZLEN))
return NETDEV_TX_OK;
length = ETH_ZLEN;
}
netif_stop_queue (dev);
eepro_dis_int(ioaddr);
spin_lock_irqsave(&lp->lock, flags);
{
unsigned char *buf = skb->data;
if (hardware_send_packet(dev, buf, length))
/* we won't wake queue here because we're out of space */
dev->stats.tx_dropped++;
else {
dev->stats.tx_bytes+=skb->len;
netif_wake_queue(dev);
}
}
dev_kfree_skb (skb);
/* You might need to clean up and record Tx statistics here. */
/* dev->stats.tx_aborted_errors++; */
if (net_debug > 5)
printk(KERN_DEBUG "%s: exiting eepro_send_packet routine.\n", dev->name);
eepro_en_int(ioaddr);
spin_unlock_irqrestore(&lp->lock, flags);
return NETDEV_TX_OK;
}
示例14: znet_send_packet
static int znet_send_packet(struct sk_buff *skb, struct net_device *dev)
{
int ioaddr = dev->base_addr;
struct znet_private *znet = dev->priv;
unsigned long flags;
short length = skb->len;
if (znet_debug > 4)
printk(KERN_DEBUG "%s: ZNet_send_packet.\n", dev->name);
if (length < ETH_ZLEN) {
skb = skb_padto(skb, ETH_ZLEN);
if (skb == NULL)
return 0;
length = ETH_ZLEN;
}
netif_stop_queue (dev);
/* Check that the part hasn't reset itself, probably from suspend. */
outb(CR0_STATUS_0, ioaddr);
if (inw(ioaddr) == 0x0010 &&
inw(ioaddr) == 0x0000 &&
inw(ioaddr) == 0x0010) {
if (znet_debug > 1)
printk (KERN_WARNING "%s : waking up\n", dev->name);
hardware_init(dev);
znet_transceiver_power (dev, 1);
}
if (1) {
unsigned char *buf = (void *)skb->data;
ushort *tx_link = znet->tx_cur - 1;
ushort rnd_len = (length + 1)>>1;
znet->stats.tx_bytes+=length;
if (znet->tx_cur >= znet->tx_end)
znet->tx_cur = znet->tx_start;
*znet->tx_cur++ = length;
if (znet->tx_cur + rnd_len + 1 > znet->tx_end) {
int semi_cnt = (znet->tx_end - znet->tx_cur)<<1; /* Cvrt to byte cnt. */
memcpy(znet->tx_cur, buf, semi_cnt);
rnd_len -= semi_cnt>>1;
memcpy(znet->tx_start, buf + semi_cnt, length - semi_cnt);
znet->tx_cur = znet->tx_start + rnd_len;
} else {
示例15: rt2x00usb_kick_tx_entry
static bool rt2x00usb_kick_tx_entry(struct queue_entry *entry, void *data)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
struct queue_entry_priv_usb *entry_priv = entry->priv_data;
u32 length;
int status;
if (!test_and_clear_bit(ENTRY_DATA_PENDING, &entry->flags) ||
test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
return false;
/*
* USB devices require certain padding at the end of each frame
* and urb. Those paddings are not included in skbs. Pass entry
* to the driver to determine what the overall length should be.
*/
length = rt2x00dev->ops->lib->get_tx_data_len(entry);
status = skb_padto(entry->skb, length);
if (unlikely(status)) {
/* TODO: report something more appropriate than IO_FAILED. */
rt2x00_warn(rt2x00dev, "TX SKB padding error, out of memory\n");
set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
rt2x00lib_dmadone(entry);
return false;
}
usb_fill_bulk_urb(entry_priv->urb, usb_dev,
usb_sndbulkpipe(usb_dev, entry->queue->usb_endpoint),
entry->skb->data, length,
rt2x00usb_interrupt_txdone, entry);
usb_anchor_urb(entry_priv->urb, rt2x00dev->anchor);
status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
if (status) {
usb_unanchor_urb(entry_priv->urb);
if (status == -ENODEV)
clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
rt2x00lib_dmadone(entry);
}
return false;
}