本文整理汇总了C++中dev_kfree_skb函数的典型用法代码示例。如果您正苦于以下问题:C++ dev_kfree_skb函数的具体用法?C++ dev_kfree_skb怎么用?C++ dev_kfree_skb使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了dev_kfree_skb函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: net_interrupt
/*
* The typical workload of the driver:
* Handle the network interface interrupts.
*/
static void
net_interrupt(int irq, void *dev_id, struct pt_regs * regs)
{
struct net_device *dev;
struct net_local *lp;
int status = 0;
BUGMSG("net_interrupt..");
if (dev_id == NULL) {
printk(KERN_WARNING "%s: irq %d for unknown device.\n", cardname, irq);
return;
}
dev = (struct net_device *) dev_id;
lp = (struct net_local *) dev->priv;
status = lp->uEth->INTST;
/* RX Start: No frame available during frame RX */
if(status & ETHINTSTRXSTART) {
printk("PANIC: Ethernet RX Queue is empty: this should NEVER Occur!!!\n");
printk("rxq_ptr = %d\n", lp->rxq_ptr);
}
/* RX End: a frame is received successfully */
if(status & ETHINTSTRXEND) {
BUGMSG("RX end..");
do {
unsigned int rx_packet_stat = lp->uEth->RXSTAT;
if(rx_packet_stat & ETHRXSTATERROR) {
printk("RX Packet error 0x%.8x\n", rx_packet_stat);
lp->stats.rx_dropped++;
/* We reuse the frame */
lp->uEth->RXADDR = (int)lp->rx_frames[lp->rxq_ptr & (ETH_RXQ_SIZE -1)];
} else {
unsigned int length = (rx_packet_stat & 0x3fff);
struct sk_buff *skb;
struct sk_buff *newskb;
BUGMSG("rxq %d, skblen %d..", lp->rxq_ptr, length);
/* The received skb */
skb = lp->rx_frames[lp->rxq_ptr & (ETH_RXQ_SIZE -1)];
skb_put(skb, length);
lp->stats.rx_packets++;
lp->stats.rx_bytes += length;
/* Alloc new skb for rx_frame ringbuffer */
newskb = dev_alloc_skb( PKT_BUF_SZ );
if (newskb == NULL) {
/* We assume that we can consume and produce the RX ring
* buffer at the same time. In this case, we cannot
* produce so we will eventually crash ...
*/
printk("Cannot allocate skb. This is very bad.... We will CRASH!\n");
} else {
newskb->dev = dev;
newskb->protocol = eth_type_trans(skb, dev);
newskb->ip_summed = CHECKSUM_UNNECESSARY; /* don't check it */
/* word align IP header */
skb_reserve( newskb, 2 );
lp->rx_frames[lp->rxq_ptr & (ETH_RXQ_SIZE -1)] = newskb;
/* Put in Ethernet RX queue */
lp->uEth->RXADDR = (int) newskb->data;
}
lp->rxq_ptr++;
netif_rx(skb);
}
} while(lp->uEth->INTST & ETHINTSTRXEND);
}
if(status & ETHINTSTTXEND) {
BUGMSG("TX end..");
do {
unsigned int tx_packet_stat = lp->uEth->TXSTAT;
struct sk_buff *skb;
BUGMSG("*%d %d", lp->txq_p, lp->txq_c);
if(tx_packet_stat)
printk("TX Packet error 0x%.8x\n", tx_packet_stat);
skb = lp->tx_frames[lp->txq_c & (ETH_TXQ_SIZE -1)];
/* paranoia check should be removed */
if(skb == NULL) {
printk("ERROR: tx frame NULL\n");
break;
}
lp->tx_frames[lp->txq_c & (ETH_TXQ_SIZE -1)] = NULL;
lp->txq_c++;
lp->stats.tx_packets++;
dev_kfree_skb(skb);
} while(lp->uEth->INTST & ETHINTSTTXEND);
}
BUGMSG("\n");
return;
}
示例2: cfv_netdev_tx
/* Put the CAIF packet on the virtio ring and kick the receiver */
static int cfv_netdev_tx(struct sk_buff *skb, struct net_device *netdev)
{
struct cfv_info *cfv = netdev_priv(netdev);
struct buf_info *buf_info;
struct scatterlist sg;
unsigned long flags;
bool flow_off = false;
int ret;
/* garbage collect released buffers */
cfv_release_used_buf(cfv->vq_tx);
spin_lock_irqsave(&cfv->tx_lock, flags);
/* Flow-off check takes into account number of cpus to make sure
* virtqueue will not be overfilled in any possible smp conditions.
*
* Flow-on is triggered when sufficient buffers are freed
*/
if (unlikely(cfv->vq_tx->num_free <= num_present_cpus())) {
flow_off = true;
cfv->stats.tx_full_ring++;
}
/* If we run out of memory, we release the memory reserve and retry
* allocation.
*/
buf_info = cfv_alloc_and_copy_to_shm(cfv, skb, &sg);
if (unlikely(!buf_info)) {
cfv->stats.tx_no_mem++;
flow_off = true;
if (cfv->reserved_mem && cfv->genpool) {
gen_pool_free(cfv->genpool, cfv->reserved_mem,
cfv->reserved_size);
cfv->reserved_mem = 0;
buf_info = cfv_alloc_and_copy_to_shm(cfv, skb, &sg);
}
}
if (unlikely(flow_off)) {
/* Turn flow on when a 1/4 of the descriptors are released */
cfv->watermark_tx = virtqueue_get_vring_size(cfv->vq_tx) / 4;
/* Enable notifications of recycled TX buffers */
virtqueue_enable_cb(cfv->vq_tx);
netif_tx_stop_all_queues(netdev);
}
if (unlikely(!buf_info)) {
/* If the memory reserve does it's job, this shouldn't happen */
netdev_warn(cfv->ndev, "Out of gen_pool memory\n");
goto err;
}
ret = virtqueue_add_outbuf(cfv->vq_tx, &sg, 1, buf_info, GFP_ATOMIC);
if (unlikely((ret < 0))) {
/* If flow control works, this shouldn't happen */
netdev_warn(cfv->ndev, "Failed adding buffer to TX vring:%d\n",
ret);
goto err;
}
/* update netdev statistics */
cfv->ndev->stats.tx_packets++;
cfv->ndev->stats.tx_bytes += skb->len;
spin_unlock_irqrestore(&cfv->tx_lock, flags);
/* tell the remote processor it has a pending message to read */
virtqueue_kick(cfv->vq_tx);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
err:
spin_unlock_irqrestore(&cfv->tx_lock, flags);
cfv->ndev->stats.tx_dropped++;
free_buf_info(cfv, buf_info);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
示例3: eemcs_ipc_write
static ssize_t eemcs_ipc_write(struct file *fp, const char __user *buf, size_t in_sz, loff_t *ppos)
{
ssize_t ret = 0;
eemcs_ipc_node_t *curr_node = (eemcs_ipc_node_t *)fp->private_data;
KAL_UINT8 node_id = curr_node->ipc_node_id;/* node_id */
KAL_UINT8 port_id = eemcs_ipc_inst.eemcs_port_id; /* port_id */
KAL_UINT32 p_type, control_flag;
struct sk_buff *new_skb;
CCCI_BUFF_T *ccci_header;
ipc_ilm_t *ilm=NULL;
IPC_MSGSVC_TASKMAP_T *id_map;
size_t count = in_sz;
size_t skb_alloc_size;
KAL_UINT32 alloc_time = 0, curr_time = 0;
DEBUG_LOG_FUNCTION_ENTRY;
DBGLOG(IPCD, DBG, "[TX]deivce=%s iminor=%d len=%d", curr_node->dev_name, node_id, count);
p_type = ccci_get_port_type(port_id);
if(p_type != EX_T_USER)
{
DBGLOG(IPCD, ERR, "PORT%d refuse port(%d) access user port", port_id, p_type);
ret=-EINVAL;
goto _exit;
}
control_flag = ccci_get_port_cflag(port_id);
if (check_device_state() == EEMCS_EXCEPTION) {//modem exception
if ((control_flag & TX_PRVLG2) == 0) {
DBGLOG(IPCD, TRA, "[TX]PORT%d write fail when modem exception", port_id);
return -ETXTBSY;
}
} else if (check_device_state() != EEMCS_BOOTING_DONE) {//modem not ready
if ((control_flag & TX_PRVLG1) == 0) {
DBGLOG(IPCD, TRA, "[TX]PORT%d write fail when modem not ready", port_id);
return -ENODEV;
}
}
if((control_flag & EXPORT_CCCI_H) && (count < sizeof(CCCI_BUFF_T)))
{
DBGLOG(IPCD, ERR, "invalid wirte_len(%d) of PORT%d", count, port_id);
ret=-EINVAL;
goto _exit;
}
if(control_flag & EXPORT_CCCI_H){
if(count > (MAX_TX_BYTE+sizeof(CCCI_BUFF_T))){
DBGLOG(IPCD, WAR, "PORT%d wirte_len(%d)>MTU(%d)!", port_id, count, MAX_TX_BYTE);
count = MAX_TX_BYTE+sizeof(CCCI_BUFF_T);
}
skb_alloc_size = count - sizeof(CCCI_BUFF_T);
}else{
if(count > MAX_TX_BYTE){
DBGLOG(IPCD, WAR, "PORT%d wirte_len(%d)>MTU(%d)!", port_id, count, MAX_TX_BYTE);
count = MAX_TX_BYTE;
}
skb_alloc_size = count;
}
if (ccci_ch_write_space_alloc(eemcs_ipc_inst.ccci_ch.tx)==0){
DBGLOG(IPCD, WAR, "PORT%d write return 0)", port_id);
ret = -EAGAIN;
goto _exit;
}
new_skb = ccci_ipc_mem_alloc(skb_alloc_size + CCCI_IPC_HEADER_ROOM, GFP_ATOMIC);
if(NULL == new_skb)
{
DBGLOG(CHAR, INF, "[TX]PORT%d alloc skb fail with wait", port_id);
alloc_time = jiffies;
new_skb = ccci_ipc_mem_alloc(skb_alloc_size + CCCI_IPC_HEADER_ROOM, GFP_KERNEL);
if (NULL == new_skb) {
ret = -ENOMEM;
DBGLOG(IPCD, ERR, "[TX]PORT%d alloc skb fail with wait fail", port_id);
goto _exit;
}
curr_time = jiffies;
if ((curr_time - alloc_time) >= 1) {
DBGLOG(IPCD, ERR, "[TX]PORT%d alloc skb delay: time=%dms", port_id, \
10*(curr_time - alloc_time));
}
}
/* reserve SDIO_H header room */
#ifdef CCCI_SDIO_HEAD
skb_reserve(new_skb, sizeof(SDIO_H));
#endif
ccci_header = (CCCI_BUFF_T *)skb_put(new_skb, sizeof(CCCI_BUFF_T)) ;
if(copy_from_user(skb_put(new_skb, count), buf, count))
{
DBGLOG(IPCD, ERR, "[TX]PORT%d copy_from_user(len=%d, %p->%p) fail", \
port_id, count, buf, new_skb->data);
dev_kfree_skb(new_skb);
ret = -EFAULT;
goto _exit;
}
//.........这里部分代码省略.........
示例4: ni52_send_packet
static int ni52_send_packet(struct sk_buff *skb, struct net_device *dev)
{
int len, i;
#ifndef NO_NOPCOMMANDS
int next_nop;
#endif
struct priv *p = netdev_priv(dev);
if (skb->len > XMIT_BUFF_SIZE) {
printk(KERN_ERR "%s: Sorry, max. framelength is %d bytes. The length of your frame is %d bytes.\n", dev->name, XMIT_BUFF_SIZE, skb->len);
return 0;
}
netif_stop_queue(dev);
memcpy_toio(p->xmit_cbuffs[p->xmit_count], skb->data, skb->len);
len = skb->len;
if (len < ETH_ZLEN) {
len = ETH_ZLEN;
memset_io(p->xmit_cbuffs[p->xmit_count]+skb->len, 0,
len - skb->len);
}
#if (NUM_XMIT_BUFFS == 1)
# ifdef NO_NOPCOMMANDS
#ifdef DEBUG
if (readb(&p->scb->cus) & CU_ACTIVE) {
printk(KERN_ERR "%s: Hmmm .. CU is still running and we wanna send a new packet.\n", dev->name);
printk(KERN_ERR "%s: stat: %04x %04x\n",
dev->name, readb(&p->scb->cus),
readw(&p->xmit_cmds[0]->cmd_status));
}
#endif
writew(TBD_LAST | len, &p->xmit_buffs[0]->size);
for (i = 0; i < 16; i++) {
writew(0, &p->xmit_cmds[0]->cmd_status);
wait_for_scb_cmd(dev);
if ((readb(&p->scb->cus) & CU_STATUS) == CU_SUSPEND)
writeb(CUC_RESUME, &p->scb->cmd_cuc);
else {
writew(make16(p->xmit_cmds[0]), &p->scb->cbl_offset);
writeb(CUC_START, &p->scb->cmd_cuc);
}
ni_attn586();
dev->trans_start = jiffies;
if (!i)
dev_kfree_skb(skb);
wait_for_scb_cmd(dev);
/* test it, because CU sometimes doesn't start immediately */
if (readb(&p->scb->cus) & CU_ACTIVE)
break;
if (readw(&p->xmit_cmds[0]->cmd_status))
break;
if (i == 15)
printk(KERN_WARNING "%s: Can't start transmit-command.\n", dev->name);
}
# else
next_nop = (p->nop_point + 1) & 0x1;
writew(TBD_LAST | len, &p->xmit_buffs[0]->size);
writew(make16(p->nop_cmds[next_nop]), &p->xmit_cmds[0]->cmd_link);
writew(make16(p->nop_cmds[next_nop]),
&p->nop_cmds[next_nop]->cmd_link);
writew(0, &p->xmit_cmds[0]->cmd_status);
writew(0, &p->nop_cmds[next_nop]->cmd_status);
writew(make16(p->xmit_cmds[0]), &p->nop_cmds[p->nop_point]->cmd_link);
dev->trans_start = jiffies;
p->nop_point = next_nop;
dev_kfree_skb(skb);
# endif
#else
writew(TBD_LAST | len, &p->xmit_buffs[p->xmit_count]->size);
next_nop = p->xmit_count + 1
if (next_nop == NUM_XMIT_BUFFS)
next_nop = 0;
writew(0, &p->xmit_cmds[p->xmit_count]->cmd_status);
/* linkpointer of xmit-command already points to next nop cmd */
writew(make16(p->nop_cmds[next_nop]),
&p->nop_cmds[next_nop]->cmd_link);
writew(0, &p->nop_cmds[next_nop]->cmd_status);
writew(make16(p->xmit_cmds[p->xmit_count]),
&p->nop_cmds[p->xmit_count]->cmd_link);
dev->trans_start = jiffies;
p->xmit_count = next_nop;
{
unsigned long flags;
spin_lock_irqsave(&p->spinlock);
if (p->xmit_count != p->xmit_last)
netif_wake_queue(dev);
spin_unlock_irqrestore(&p->spinlock);
}
dev_kfree_skb(skb);
#endif
return 0;
}
示例5: receive_skb
static void receive_skb(struct net_device *dev, struct sk_buff *skb,
unsigned len)
{
struct virtio_net_hdr *hdr = skb_vnet_hdr(skb);
if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
pr_debug("%s: short packet %i\n", dev->name, len);
dev->stats.rx_length_errors++;
goto drop;
}
len -= sizeof(struct virtio_net_hdr);
BUG_ON(len > MAX_PACKET_LEN);
skb_trim(skb, len);
skb->protocol = eth_type_trans(skb, dev);
pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
ntohs(skb->protocol), skb->len, skb->pkt_type);
dev->stats.rx_bytes += skb->len;
dev->stats.rx_packets++;
if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
pr_debug("Needs csum!\n");
skb->ip_summed = CHECKSUM_PARTIAL;
skb->csum_start = hdr->csum_start;
skb->csum_offset = hdr->csum_offset;
if (skb->csum_start > skb->len - 2
|| skb->csum_offset > skb->len - 2) {
if (net_ratelimit())
printk(KERN_WARNING "%s: csum=%u/%u len=%u\n",
dev->name, skb->csum_start,
skb->csum_offset, skb->len);
goto frame_err;
}
}
if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
pr_debug("GSO!\n");
switch (hdr->gso_type) {
case VIRTIO_NET_HDR_GSO_TCPV4:
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
break;
case VIRTIO_NET_HDR_GSO_TCPV4_ECN:
skb_shinfo(skb)->gso_type = SKB_GSO_TCP_ECN;
break;
case VIRTIO_NET_HDR_GSO_UDP:
skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
break;
case VIRTIO_NET_HDR_GSO_TCPV6:
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
break;
default:
if (net_ratelimit())
printk(KERN_WARNING "%s: bad gso type %u.\n",
dev->name, hdr->gso_type);
goto frame_err;
}
skb_shinfo(skb)->gso_size = hdr->gso_size;
if (skb_shinfo(skb)->gso_size == 0) {
if (net_ratelimit())
printk(KERN_WARNING "%s: zero gso size.\n",
dev->name);
goto frame_err;
}
/* Header must be checked, and gso_segs computed. */
skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
skb_shinfo(skb)->gso_segs = 0;
}
netif_receive_skb(skb);
return;
frame_err:
dev->stats.rx_frame_errors++;
drop:
dev_kfree_skb(skb);
}
示例6: bch_l2l1
static void
bch_l2l1(struct hisax_if *ifc, int pr, void *arg)
{
struct hfc4s8s_btype *bch = ifc->priv;
struct hfc4s8s_l1 *l1 = bch->l1p;
struct sk_buff *skb = (struct sk_buff *) arg;
long mode = (long) arg;
u_long flags;
switch (pr) {
case (PH_DATA | REQUEST):
if (!l1->enabled || (bch->mode == L1_MODE_NULL)) {
dev_kfree_skb(skb);
break;
}
spin_lock_irqsave(&l1->lock, flags);
skb_queue_tail(&bch->tx_queue, skb);
if (!bch->tx_skb && (bch->tx_cnt <= 0)) {
l1->hw->mr.r_irq_fifo_blx[l1->st_num] |=
((bch->bchan == 1) ? 1 : 4);
spin_unlock_irqrestore(&l1->lock, flags);
schedule_work(&l1->hw->tqueue);
} else
spin_unlock_irqrestore(&l1->lock, flags);
break;
case (PH_ACTIVATE | REQUEST):
case (PH_DEACTIVATE | REQUEST):
if (!l1->enabled)
break;
if (pr == (PH_DEACTIVATE | REQUEST))
mode = L1_MODE_NULL;
switch (mode) {
case L1_MODE_HDLC:
spin_lock_irqsave(&l1->lock,
flags);
l1->hw->mr.timer_usg_cnt++;
l1->hw->mr.
fifo_slow_timer_service[l1->
st_num]
|=
((bch->bchan ==
1) ? 0x2 : 0x8);
Write_hfc8(l1->hw, R_FIFO,
(l1->st_num * 8 +
((bch->bchan ==
1) ? 0 : 2)));
wait_busy(l1->hw);
Write_hfc8(l1->hw, A_CON_HDLC, 0xc); /* HDLC mode, flag fill, connect ST */
Write_hfc8(l1->hw, A_SUBCH_CFG, 0); /* 8 bits */
Write_hfc8(l1->hw, A_IRQ_MSK, 1); /* enable TX interrupts for hdlc */
Write_hfc8(l1->hw, A_INC_RES_FIFO, 2); /* reset fifo */
wait_busy(l1->hw);
Write_hfc8(l1->hw, R_FIFO,
(l1->st_num * 8 +
((bch->bchan ==
1) ? 1 : 3)));
wait_busy(l1->hw);
Write_hfc8(l1->hw, A_CON_HDLC, 0xc); /* HDLC mode, flag fill, connect ST */
Write_hfc8(l1->hw, A_SUBCH_CFG, 0); /* 8 bits */
Write_hfc8(l1->hw, A_IRQ_MSK, 1); /* enable RX interrupts for hdlc */
Write_hfc8(l1->hw, A_INC_RES_FIFO, 2); /* reset fifo */
Write_hfc8(l1->hw, R_ST_SEL,
l1->st_num);
l1->hw->mr.r_ctrl0 |=
(bch->bchan & 3);
Write_hfc8(l1->hw, A_ST_CTRL0,
l1->hw->mr.r_ctrl0);
bch->mode = L1_MODE_HDLC;
spin_unlock_irqrestore(&l1->lock,
flags);
bch->b_if.ifc.l1l2(&bch->b_if.ifc,
PH_ACTIVATE |
INDICATION,
NULL);
break;
case L1_MODE_TRANS:
spin_lock_irqsave(&l1->lock,
flags);
l1->hw->mr.
fifo_rx_trans_enables[l1->
st_num]
|=
((bch->bchan ==
1) ? 0x2 : 0x8);
l1->hw->mr.timer_usg_cnt++;
Write_hfc8(l1->hw, R_FIFO,
(l1->st_num * 8 +
((bch->bchan ==
1) ? 0 : 2)));
wait_busy(l1->hw);
Write_hfc8(l1->hw, A_CON_HDLC, 0xf); /* Transparent mode, 1 fill, connect ST */
Write_hfc8(l1->hw, A_SUBCH_CFG, 0); /* 8 bits */
Write_hfc8(l1->hw, A_IRQ_MSK, 0); /* disable TX interrupts */
//.........这里部分代码省略.........
示例7: rx_d_frame
static void
rx_d_frame(struct hfc4s8s_l1 *l1p, int ech)
{
int z1, z2;
u_char f1, f2, df;
struct sk_buff *skb;
u_char *cp;
if (!l1p->enabled)
return;
do {
/* E/D RX fifo */
Write_hfc8(l1p->hw, R_FIFO,
(l1p->st_num * 8 + ((ech) ? 7 : 5)));
wait_busy(l1p->hw);
f1 = Read_hfc8_stable(l1p->hw, A_F1);
f2 = Read_hfc8(l1p->hw, A_F2);
df = f1 - f2;
if ((f1 - f2) < 0)
df = f1 - f2 + MAX_F_CNT + 1;
if (!df) {
return; /* no complete frame in fifo */
}
z1 = Read_hfc16_stable(l1p->hw, A_Z1);
z2 = Read_hfc16(l1p->hw, A_Z2);
z1 = z1 - z2 + 1;
if (z1 < 0)
z1 += 384;
if (!(skb = dev_alloc_skb(MAX_D_FRAME_SIZE))) {
printk(KERN_INFO
"HFC-4S/8S: Could not allocate D/E "
"channel receive buffer");
Write_hfc8(l1p->hw, A_INC_RES_FIFO, 2);
wait_busy(l1p->hw);
return;
}
if (((z1 < 4) || (z1 > MAX_D_FRAME_SIZE))) {
if (skb)
dev_kfree_skb(skb);
/* remove errornous D frame */
if (df == 1) {
/* reset fifo */
Write_hfc8(l1p->hw, A_INC_RES_FIFO, 2);
wait_busy(l1p->hw);
return;
} else {
/* read errornous D frame */
#ifndef HISAX_HFC4S8S_PCIMEM
SetRegAddr(l1p->hw, A_FIFO_DATA0);
#endif
while (z1 >= 4) {
#ifdef HISAX_HFC4S8S_PCIMEM
Read_hfc32(l1p->hw, A_FIFO_DATA0);
#else
fRead_hfc32(l1p->hw);
#endif
z1 -= 4;
}
while (z1--)
#ifdef HISAX_HFC4S8S_PCIMEM
Read_hfc8(l1p->hw, A_FIFO_DATA0);
#else
fRead_hfc8(l1p->hw);
#endif
Write_hfc8(l1p->hw, A_INC_RES_FIFO, 1);
wait_busy(l1p->hw);
return;
}
}
cp = skb->data;
#ifndef HISAX_HFC4S8S_PCIMEM
SetRegAddr(l1p->hw, A_FIFO_DATA0);
#endif
while (z1 >= 4) {
#ifdef HISAX_HFC4S8S_PCIMEM
*((unsigned long *) cp) =
Read_hfc32(l1p->hw, A_FIFO_DATA0);
#else
*((unsigned long *) cp) = fRead_hfc32(l1p->hw);
#endif
cp += 4;
z1 -= 4;
}
while (z1--)
//.........这里部分代码省略.........
示例8: Dta1xxNwStartXmit
//.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.- Dta1xxNwStartXmit -.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-
//
// The stack will execute Dta1xxNwStartXmit only ones.
// (NETIF_F_LLTX flag is not set)
//
int Dta1xxNwStartXmit(struct sk_buff* pSkb, struct net_device* pDevice)
{
Dta1xxNw_Private* lp = (Dta1xxNw_Private*)netdev_priv(pDevice);
Dta1xxDmaTxHeader* pTxHeader;
Int PktLength;
Int TotalLength;
UInt FreeSpaceUntilEnd;
UInt FreeSpaceFromBegin;
UInt ReadOffset;
UInt WriteOffset;
Dta1xxNwBuf* pNwTx = &lp->m_NwTxBuf;
#if LOG_LEVEL > 0
DTA1XXNW_LOG(KERN_DEBUG, "Dta1xxNwStartXmit is called");
#endif
// Check if it's our own dummy generated packet
if ((pSkb->len >= (sizeof(EthernetIIHeader) + sizeof(IpHeaderV4)-5 + sizeof(UdpHeader) + 10)) &&
(pSkb->len <= ETH_ZLEN)){
EthernetIIHeader* pEthernetIIHeader;
IpHeaderV4* pIpHeader;
UdpHeader* pUdpHeader;
UInt8* pData;
pEthernetIIHeader = (EthernetIIHeader*)pSkb->data;
pIpHeader = (IpHeaderV4*)(pSkb->data + sizeof(EthernetIIHeader));
pUdpHeader = (UdpHeader*)(pSkb->data + sizeof(EthernetIIHeader)+ sizeof(IpHeaderV4)-5);
pData = pSkb->data + sizeof(EthernetIIHeader) + sizeof(IpHeaderV4)-5 + sizeof(UdpHeader);
if ((pUdpHeader->m_DestinationPort == 0x0400) &&
(strcmp(pData, "DektecArp\0") == 0)) {
// It's our own dummy packet. Skip it.
dev_kfree_skb(pSkb);
return STATUS_SUCCESS;
}
}
if (lp->m_Connected == 0) {
lp->m_NetStats.tx_errors++;
return STATUS_UNSUCCESSFUL;
}
PktLength = (pSkb->len < ETH_ZLEN ? ETH_ZLEN : pSkb->len);
// Make sure there is enough free space for a complete packet including header and alignment
TotalLength = PktLength + sizeof(Dta1xxDmaTxHeader);
if ((TotalLength%4)!=0)
TotalLength += (4-(TotalLength%4));
ReadOffset = pNwTx->m_ReadOffset;
WriteOffset = pNwTx->m_WriteOffset;
if (WriteOffset < ReadOffset) {
FreeSpaceFromBegin = 0;
FreeSpaceUntilEnd = ReadOffset - WriteOffset - 1;
} else {
FreeSpaceUntilEnd = pNwTx->m_BufSize - WriteOffset - 1;
if (ReadOffset == 0)
FreeSpaceFromBegin = 0;
else
FreeSpaceFromBegin = ReadOffset - 1;
}
if (FreeSpaceUntilEnd >= TotalLength) {
// Found free space at end of buffer
pTxHeader = (Dta1xxDmaTxHeader*)(pNwTx->m_pBuffer + WriteOffset);
} else if (FreeSpaceFromBegin >= TotalLength) {
// Found free space at begin of buffer
// Mark first free byte at end of buffer as not used '*',
// so the TxThread knows to start next packet at begin of buffer.
pNwTx->m_pBuffer[WriteOffset] = '*';
WriteOffset = 0;
pTxHeader = (Dta1xxDmaTxHeader*)(pNwTx->m_pBuffer + WriteOffset);
} else {
// Not enough free space, skip pakket
lp->m_NetStats.tx_dropped++;
return STATUS_UNSUCCESSFUL;
}
memset(pTxHeader, 0, sizeof(Dta1xxDmaTxHeader));
pTxHeader->Tag = 0x445441A0;
pTxHeader->Version = 1;
pTxHeader->Length = sizeof(Dta1xxDmaTxHeader);
pTxHeader->TransmitControl.PacketLength = PktLength;
WriteOffset+= sizeof(Dta1xxDmaTxHeader);
if (pSkb->len < ETH_ZLEN) {
// Reset stuffing bytes
memset(pNwTx->m_pBuffer+WriteOffset, 0, ETH_ZLEN);
}
memcpy(pNwTx->m_pBuffer+WriteOffset, pSkb->data, pSkb->len);
// Make sure the next packet is aligned on multiples of 4
if ((PktLength%4)!=0)
PktLength += (4-(PktLength%4));
//.........这里部分代码省略.........
示例9: dch_l2l1
static void
dch_l2l1(struct hisax_d_if *iface, int pr, void *arg)
{
struct hfc4s8s_l1 *l1 = iface->ifc.priv;
struct sk_buff *skb = (struct sk_buff *) arg;
u_long flags;
switch (pr) {
case (PH_DATA | REQUEST):
if (!l1->enabled) {
dev_kfree_skb(skb);
break;
}
spin_lock_irqsave(&l1->lock, flags);
skb_queue_tail(&l1->d_tx_queue, skb);
if ((skb_queue_len(&l1->d_tx_queue) == 1) &&
(l1->tx_cnt <= 0)) {
l1->hw->mr.r_irq_fifo_blx[l1->st_num] |=
0x10;
spin_unlock_irqrestore(&l1->lock, flags);
schedule_work(&l1->hw->tqueue);
} else
spin_unlock_irqrestore(&l1->lock, flags);
break;
case (PH_ACTIVATE | REQUEST):
if (!l1->enabled)
break;
if (!l1->nt_mode) {
if (l1->l1_state < 6) {
spin_lock_irqsave(&l1->lock,
flags);
Write_hfc8(l1->hw, R_ST_SEL,
l1->st_num);
Write_hfc8(l1->hw, A_ST_WR_STA,
0x60);
mod_timer(&l1->l1_timer,
jiffies + L1_TIMER_T3);
spin_unlock_irqrestore(&l1->lock,
flags);
} else if (l1->l1_state == 7)
l1->d_if.ifc.l1l2(&l1->d_if.ifc,
PH_ACTIVATE |
INDICATION,
NULL);
} else {
if (l1->l1_state != 3) {
spin_lock_irqsave(&l1->lock,
flags);
Write_hfc8(l1->hw, R_ST_SEL,
l1->st_num);
Write_hfc8(l1->hw, A_ST_WR_STA,
0x60);
spin_unlock_irqrestore(&l1->lock,
flags);
} else if (l1->l1_state == 3)
l1->d_if.ifc.l1l2(&l1->d_if.ifc,
PH_ACTIVATE |
INDICATION,
NULL);
}
break;
default:
printk(KERN_INFO
"HFC-4S/8S: Unknown D-chan cmd 0x%x received, ignored\n",
pr);
break;
}
if (!l1->enabled)
l1->d_if.ifc.l1l2(&l1->d_if.ifc,
PH_DEACTIVATE | INDICATION, NULL);
} /* dch_l2l1 */
示例10: ibmlana_tx
static int ibmlana_tx(struct sk_buff *skb, struct net_device *dev)
{
ibmlana_priv *priv = netdev_priv(dev);
int retval = 0, tmplen, addr;
unsigned long flags;
tda_t tda;
int baddr;
/* find out if there are free slots for a frame to transmit. If not,
the upper layer is in deep desperation and we simply ignore the frame. */
if (priv->txusedcnt >= TXBUFCNT) {
retval = -EIO;
priv->stat.tx_dropped++;
goto tx_done;
}
/* copy the frame data into the next free transmit buffer - fillup missing */
tmplen = skb->len;
if (tmplen < 60)
tmplen = 60;
baddr = priv->txbufstart + (priv->nexttxdescr * PKTSIZE);
memcpy_toio(priv->base + baddr, skb->data, skb->len);
/* copy filler into RAM - in case we're filling up...
we're filling a bit more than necessary, but that doesn't harm
since the buffer is far larger...
Sorry Linus for the filler string but I couldn't resist ;-) */
if (tmplen > skb->len) {
char *fill = "NetBSD is a nice OS too! ";
unsigned int destoffs = skb->len, l = strlen(fill);
while (destoffs < tmplen) {
memcpy_toio(priv->base + baddr + destoffs, fill, l);
destoffs += l;
}
}
/* set up the new frame descriptor */
addr = priv->tdastart + (priv->nexttxdescr * sizeof(tda_t));
memcpy_fromio(&tda, priv->base + addr, sizeof(tda_t));
tda.length = tda.fraglength = tmplen;
memcpy_toio(priv->base + addr, &tda, sizeof(tda_t));
/* if there were no active descriptors, trigger the SONIC */
spin_lock_irqsave(&priv->lock, flags);
priv->txusedcnt++;
priv->txused[priv->nexttxdescr] = 1;
/* are all transmission slots used up ? */
if (priv->txusedcnt >= TXBUFCNT)
netif_stop_queue(dev);
if (priv->txusedcnt == 1)
StartTx(dev, priv->nexttxdescr);
priv->nexttxdescr = (priv->nexttxdescr + 1) % TXBUFCNT;
spin_unlock_irqrestore(&priv->lock, flags);
tx_done:
dev_kfree_skb(skb);
return retval;
}
示例11: rt2x00lib_rxdone
void rt2x00lib_rxdone(struct queue_entry *entry, gfp_t gfp)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
struct rxdone_entry_desc rxdesc;
struct sk_buff *skb;
struct ieee80211_rx_status *rx_status;
unsigned int header_length;
int rate_idx;
if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) ||
!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
goto submit_entry;
if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags))
goto submit_entry;
/*
* Allocate a new sk_buffer. If no new buffer available, drop the
* received frame and reuse the existing buffer.
*/
skb = rt2x00queue_alloc_rxskb(entry, gfp);
if (!skb)
goto submit_entry;
/*
* Unmap the skb.
*/
rt2x00queue_unmap_skb(entry);
/*
* Extract the RXD details.
*/
memset(&rxdesc, 0, sizeof(rxdesc));
rt2x00dev->ops->lib->fill_rxdone(entry, &rxdesc);
/*
* Check for valid size in case we get corrupted descriptor from
* hardware.
*/
if (unlikely(rxdesc.size == 0 ||
rxdesc.size > entry->queue->data_size)) {
ERROR(rt2x00dev, "Wrong frame size %d max %d.\n",
rxdesc.size, entry->queue->data_size);
dev_kfree_skb(entry->skb);
goto renew_skb;
}
/*
* The data behind the ieee80211 header must be
* aligned on a 4 byte boundary.
*/
header_length = ieee80211_get_hdrlen_from_skb(entry->skb);
/*
* Hardware might have stripped the IV/EIV/ICV data,
* in that case it is possible that the data was
* provided separately (through hardware descriptor)
* in which case we should reinsert the data into the frame.
*/
if ((rxdesc.dev_flags & RXDONE_CRYPTO_IV) &&
(rxdesc.flags & RX_FLAG_IV_STRIPPED))
rt2x00crypto_rx_insert_iv(entry->skb, header_length,
&rxdesc);
else if (header_length &&
(rxdesc.size > header_length) &&
(rxdesc.dev_flags & RXDONE_L2PAD))
rt2x00queue_remove_l2pad(entry->skb, header_length);
/* Trim buffer to correct size */
skb_trim(entry->skb, rxdesc.size);
/*
* Translate the signal to the correct bitrate index.
*/
rate_idx = rt2x00lib_rxdone_read_signal(rt2x00dev, &rxdesc);
if (rxdesc.rate_mode == RATE_MODE_HT_MIX ||
rxdesc.rate_mode == RATE_MODE_HT_GREENFIELD)
rxdesc.flags |= RX_FLAG_HT;
/*
* Check if this is a beacon, and more frames have been
* buffered while we were in powersaving mode.
*/
rt2x00lib_rxdone_check_ps(rt2x00dev, entry->skb, &rxdesc);
/*
* Update extra components
*/
rt2x00link_update_stats(rt2x00dev, entry->skb, &rxdesc);
rt2x00debug_update_crypto(rt2x00dev, &rxdesc);
rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_RXDONE, entry->skb);
/*
* Initialize RX status information, and send frame
* to mac80211.
*/
rx_status = IEEE80211_SKB_RXCB(entry->skb);
/* Ensure that all fields of rx_status are initialized
* properly. The skb->cb array was used for driver
//.........这里部分代码省略.........
示例12: sonic_send_packet
static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
{
struct sonic_local *lp = netdev_priv(dev);
dma_addr_t laddr;
int length;
int entry = lp->next_tx;
if (sonic_debug > 2)
;
length = skb->len;
if (length < ETH_ZLEN) {
if (skb_padto(skb, ETH_ZLEN))
return NETDEV_TX_OK;
length = ETH_ZLEN;
}
/*
* Map the packet data into the logical DMA address space
*/
laddr = dma_map_single(lp->device, skb->data, length, DMA_TO_DEVICE);
if (!laddr) {
;
dev_kfree_skb(skb);
return NETDEV_TX_BUSY;
}
sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0); /* clear status */
sonic_tda_put(dev, entry, SONIC_TD_FRAG_COUNT, 1); /* single fragment */
sonic_tda_put(dev, entry, SONIC_TD_PKTSIZE, length); /* length of packet */
sonic_tda_put(dev, entry, SONIC_TD_FRAG_PTR_L, laddr & 0xffff);
sonic_tda_put(dev, entry, SONIC_TD_FRAG_PTR_H, laddr >> 16);
sonic_tda_put(dev, entry, SONIC_TD_FRAG_SIZE, length);
sonic_tda_put(dev, entry, SONIC_TD_LINK,
sonic_tda_get(dev, entry, SONIC_TD_LINK) | SONIC_EOL);
/*
* Must set tx_skb[entry] only after clearing status, and
* before clearing EOL and before stopping queue
*/
wmb();
lp->tx_len[entry] = length;
lp->tx_laddr[entry] = laddr;
lp->tx_skb[entry] = skb;
wmb();
sonic_tda_put(dev, lp->eol_tx, SONIC_TD_LINK,
sonic_tda_get(dev, lp->eol_tx, SONIC_TD_LINK) & ~SONIC_EOL);
lp->eol_tx = entry;
lp->next_tx = (entry + 1) & SONIC_TDS_MASK;
if (lp->tx_skb[lp->next_tx] != NULL) {
/* The ring is full, the ISR has yet to process the next TD. */
if (sonic_debug > 3)
;
netif_stop_queue(dev);
/* after this packet, wait for ISR to free up some TDAs */
} else netif_start_queue(dev);
if (sonic_debug > 2)
;
SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
return NETDEV_TX_OK;
}
示例13: ri_tasklet
static void ri_tasklet(unsigned long dev)
{
struct net_device *_dev = (struct net_device *)dev;
struct ifb_private *dp = netdev_priv(_dev);
struct net_device_stats *stats = &_dev->stats;
struct netdev_queue *txq;
struct sk_buff *skb;
txq = netdev_get_tx_queue(_dev, 0);
dp->st_task_enter++;
if ((skb = skb_peek(&dp->tq)) == NULL) {
dp->st_txq_refl_try++;
if (__netif_tx_trylock(txq)) {
dp->st_rxq_enter++;
while ((skb = skb_dequeue(&dp->rq)) != NULL) {
skb_queue_tail(&dp->tq, skb);
dp->st_rx2tx_tran++;
}
__netif_tx_unlock(txq);
} else {
/* reschedule */
dp->st_rxq_notenter++;
goto resched;
}
}
while ((skb = skb_dequeue(&dp->tq)) != NULL) {
u32 from = G_TC_FROM(skb->tc_verd);
skb->tc_verd = 0;
skb->tc_verd = SET_TC_NCLS(skb->tc_verd);
stats->tx_packets++;
stats->tx_bytes +=skb->len;
skb->dev = dev_get_by_index(&init_net, skb->iif);
if (!skb->dev) {
dev_kfree_skb(skb);
stats->tx_dropped++;
break;
}
dev_put(skb->dev);
skb->iif = _dev->ifindex;
if (from & AT_EGRESS) {
dp->st_rx_frm_egr++;
dev_queue_xmit(skb);
} else if (from & AT_INGRESS) {
dp->st_rx_frm_ing++;
skb_pull(skb, skb->dev->hard_header_len);
netif_rx(skb);
} else
BUG();
}
if (__netif_tx_trylock(txq)) {
dp->st_rxq_check++;
if ((skb = skb_peek(&dp->rq)) == NULL) {
dp->tasklet_pending = 0;
if (netif_queue_stopped(_dev))
netif_wake_queue(_dev);
} else {
dp->st_rxq_rsch++;
__netif_tx_unlock(txq);
goto resched;
}
__netif_tx_unlock(txq);
} else {
resched:
dp->tasklet_pending = 1;
tasklet_schedule(&dp->ifb_tasklet);
}
}
示例14: tx_d_frame
static void
tx_d_frame(struct hfc4s8s_l1 *l1p)
{
struct sk_buff *skb;
u_char f1, f2;
u_char *cp;
long cnt;
if (l1p->l1_state != 7)
return;
/* TX fifo */
Write_hfc8(l1p->hw, R_FIFO, (l1p->st_num * 8 + 4));
wait_busy(l1p->hw);
f1 = Read_hfc8(l1p->hw, A_F1);
f2 = Read_hfc8_stable(l1p->hw, A_F2);
if ((f1 ^ f2) & MAX_F_CNT)
return; /* fifo is still filled */
if (l1p->tx_cnt > 0) {
cnt = l1p->tx_cnt;
l1p->tx_cnt = 0;
l1p->d_if.ifc.l1l2(&l1p->d_if.ifc, PH_DATA | CONFIRM,
(void *) cnt);
}
if ((skb = skb_dequeue(&l1p->d_tx_queue))) {
cp = skb->data;
cnt = skb->len;
#ifndef HISAX_HFC4S8S_PCIMEM
SetRegAddr(l1p->hw, A_FIFO_DATA0);
#endif
while (cnt >= 4) {
#ifdef HISAX_HFC4S8S_PCIMEM
fWrite_hfc32(l1p->hw, A_FIFO_DATA0,
*(unsigned long *) cp);
#else
SetRegAddr(l1p->hw, A_FIFO_DATA0);
fWrite_hfc32(l1p->hw, *(unsigned long *) cp);
#endif
cp += 4;
cnt -= 4;
}
#ifdef HISAX_HFC4S8S_PCIMEM
while (cnt--)
fWrite_hfc8(l1p->hw, A_FIFO_DATA0, *cp++);
#else
while (cnt--)
fWrite_hfc8(l1p->hw, *cp++);
#endif
l1p->tx_cnt = skb->truesize;
Write_hfc8(l1p->hw, A_INC_RES_FIFO, 1); /* increment f counter */
wait_busy(l1p->hw);
dev_kfree_skb(skb);
}
} /* tx_d_frame */
示例15: xennet_start_xmit
//.........这里部分代码省略.........
if (unlikely(skb->len > XEN_NETIF_MAX_TX_SIZE)) {
net_alert_ratelimited(
"xennet: skb->len = %u, too big for wire format\n",
skb->len);
goto drop;
}
frags += DIV_ROUND_UP(offset + len, PAGE_SIZE);
if (unlikely(frags > MAX_SKB_FRAGS + 1)) {
printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n",
frags);
dump_stack();
goto drop;
}
spin_lock_irqsave(&np->tx_lock, flags);
if (unlikely(!netif_carrier_ok(dev) ||
(frags > 1 && !xennet_can_sg(dev)) ||
netif_needs_gso(skb, netif_skb_features(skb)))) {
spin_unlock_irqrestore(&np->tx_lock, flags);
goto drop;
}
i = np->tx.req_prod_pvt;
id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
np->tx_skbs[id].skb = skb;
tx = RING_GET_REQUEST(&np->tx, i);
tx->id = id;
ref = gnttab_claim_grant_reference(&np->gref_tx_head);
BUG_ON((signed short)ref < 0);
mfn = virt_to_mfn(data);
gnttab_grant_foreign_access_ref(
ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly);
tx->gref = np->grant_tx_ref[id] = ref;
tx->offset = offset;
tx->size = len;
extra = NULL;
tx->flags = 0;
if (skb->ip_summed == CHECKSUM_PARTIAL)
/* local packet? */
tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated;
else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
/* remote but checksummed. */
tx->flags |= XEN_NETTXF_data_validated;
if (skb_shinfo(skb)->gso_size) {
struct xen_netif_extra_info *gso;
gso = (struct xen_netif_extra_info *)
RING_GET_REQUEST(&np->tx, ++i);
if (extra)
extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
else
tx->flags |= XEN_NETTXF_extra_info;
gso->u.gso.size = skb_shinfo(skb)->gso_size;
gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
gso->u.gso.pad = 0;
gso->u.gso.features = 0;
gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
gso->flags = 0;
extra = gso;
}
np->tx.req_prod_pvt = i + 1;
xennet_make_frags(skb, dev, tx);
tx->size = skb->len;
RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify);
if (notify)
notify_remote_via_irq(np->netdev->irq);
u64_stats_update_begin(&stats->syncp);
stats->tx_bytes += skb->len;
stats->tx_packets++;
u64_stats_update_end(&stats->syncp);
/* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
xennet_tx_buf_gc(dev);
if (!netfront_tx_slot_available(np))
netif_stop_queue(dev);
spin_unlock_irqrestore(&np->tx_lock, flags);
return NETDEV_TX_OK;
drop:
dev->stats.tx_dropped++;
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}