本文整理汇总了C++中skb_copy_to_linear_data函数的典型用法代码示例。如果您正苦于以下问题:C++ skb_copy_to_linear_data函数的具体用法?C++ skb_copy_to_linear_data怎么用?C++ skb_copy_to_linear_data使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了skb_copy_to_linear_data函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: tipc_msg_build
/**
* tipc_msg_build - create buffer chain containing specified header and data
* @mhdr: Message header, to be prepended to data
* @m: User message
* @offset: Posision in iov to start copying from
* @dsz: Total length of user data
* @pktmax: Max packet size that can be used
* @list: Buffer or chain of buffers to be returned to caller
*
* Returns message data size or errno: -ENOMEM, -EFAULT
*/
int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
int dsz, int pktmax, struct sk_buff_head *list)
{
int mhsz = msg_hdr_sz(mhdr);
int msz = mhsz + dsz;
int pktno = 1;
int pktsz;
int pktrem = pktmax;
int drem = dsz;
struct tipc_msg pkthdr;
struct sk_buff *skb;
char *pktpos;
int rc;
msg_set_size(mhdr, msz);
/* No fragmentation needed? */
if (likely(msz <= pktmax)) {
skb = tipc_buf_acquire(msz);
if (unlikely(!skb))
return -ENOMEM;
__skb_queue_tail(list, skb);
skb_copy_to_linear_data(skb, mhdr, mhsz);
pktpos = skb->data + mhsz;
if (!dsz || !memcpy_fromiovecend(pktpos, m->msg_iter.iov, offset,
dsz))
return dsz;
rc = -EFAULT;
goto error;
}
/* Prepare reusable fragment header */
tipc_msg_init(&pkthdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
INT_H_SIZE, msg_destnode(mhdr));
msg_set_size(&pkthdr, pktmax);
msg_set_fragm_no(&pkthdr, pktno);
/* Prepare first fragment */
skb = tipc_buf_acquire(pktmax);
if (!skb)
return -ENOMEM;
__skb_queue_tail(list, skb);
pktpos = skb->data;
skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
pktpos += INT_H_SIZE;
pktrem -= INT_H_SIZE;
skb_copy_to_linear_data_offset(skb, INT_H_SIZE, mhdr, mhsz);
pktpos += mhsz;
pktrem -= mhsz;
do {
if (drem < pktrem)
pktrem = drem;
if (memcpy_fromiovecend(pktpos, m->msg_iter.iov, offset, pktrem)) {
rc = -EFAULT;
goto error;
}
drem -= pktrem;
offset += pktrem;
if (!drem)
break;
/* Prepare new fragment: */
if (drem < (pktmax - INT_H_SIZE))
pktsz = drem + INT_H_SIZE;
else
pktsz = pktmax;
skb = tipc_buf_acquire(pktsz);
if (!skb) {
rc = -ENOMEM;
goto error;
}
__skb_queue_tail(list, skb);
msg_set_type(&pkthdr, FRAGMENT);
msg_set_size(&pkthdr, pktsz);
msg_set_fragm_no(&pkthdr, ++pktno);
skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
pktpos = skb->data + INT_H_SIZE;
pktrem = pktsz - INT_H_SIZE;
} while (1);
msg_set_type(buf_msg(skb), LAST_FRAGMENT);
return dsz;
error:
__skb_queue_purge(list);
__skb_queue_head_init(list);
return rc;
//.........这里部分代码省略.........
示例2: ip6mr_cache_report
static int ip6mr_cache_report(struct sk_buff *pkt, mifi_t mifi, int assert)
{
struct sk_buff *skb;
struct mrt6msg *msg;
int ret;
#ifdef CONFIG_IPV6_PIMSM_V2
if (assert == MRT6MSG_WHOLEPKT)
skb = skb_realloc_headroom(pkt, -skb_network_offset(pkt)
+sizeof(*msg));
else
#endif
skb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(*msg), GFP_ATOMIC);
if (!skb)
return -ENOBUFS;
/* I suppose that internal messages
* do not require checksums */
skb->ip_summed = CHECKSUM_UNNECESSARY;
#ifdef CONFIG_IPV6_PIMSM_V2
if (assert == MRT6MSG_WHOLEPKT) {
/* Ugly, but we have no choice with this interface.
Duplicate old header, fix length etc.
And all this only to mangle msg->im6_msgtype and
to set msg->im6_mbz to "mbz" :-)
*/
skb_push(skb, -skb_network_offset(pkt));
skb_push(skb, sizeof(*msg));
skb_reset_transport_header(skb);
msg = (struct mrt6msg *)skb_transport_header(skb);
msg->im6_mbz = 0;
msg->im6_msgtype = MRT6MSG_WHOLEPKT;
msg->im6_mif = reg_vif_num;
msg->im6_pad = 0;
ipv6_addr_copy(&msg->im6_src, &ipv6_hdr(pkt)->saddr);
ipv6_addr_copy(&msg->im6_dst, &ipv6_hdr(pkt)->daddr);
skb->ip_summed = CHECKSUM_UNNECESSARY;
} else
#endif
{
/*
* Copy the IP header
*/
skb_put(skb, sizeof(struct ipv6hdr));
skb_reset_network_header(skb);
skb_copy_to_linear_data(skb, ipv6_hdr(pkt), sizeof(struct ipv6hdr));
/*
* Add our header
*/
skb_put(skb, sizeof(*msg));
skb_reset_transport_header(skb);
msg = (struct mrt6msg *)skb_transport_header(skb);
msg->im6_mbz = 0;
msg->im6_msgtype = assert;
msg->im6_mif = mifi;
msg->im6_pad = 0;
ipv6_addr_copy(&msg->im6_src, &ipv6_hdr(pkt)->saddr);
ipv6_addr_copy(&msg->im6_dst, &ipv6_hdr(pkt)->daddr);
skb->dst = dst_clone(pkt->dst);
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb_pull(skb, sizeof(struct ipv6hdr));
}
if (mroute6_socket == NULL) {
kfree_skb(skb);
return -EINVAL;
}
/*
* Deliver to user space multicast routing algorithms
*/
if ((ret = sock_queue_rcv_skb(mroute6_socket, skb)) < 0) {
if (net_ratelimit())
printk(KERN_WARNING "mroute6: pending queue full, dropping entries.\n");
kfree_skb(skb);
}
return ret;
}
示例3: sun3_82586_rcv_int
static void sun3_82586_rcv_int(struct net_device *dev)
{
int status,cnt=0;
unsigned short totlen;
struct sk_buff *skb;
struct rbd_struct *rbd;
struct priv *p = netdev_priv(dev);
if(debuglevel > 0)
printk("R");
for(;(status = p->rfd_top->stat_high) & RFD_COMPL;)
{
rbd = (struct rbd_struct *) make32(p->rfd_top->rbd_offset);
if(status & RFD_OK) /* frame received without error? */
{
if( (totlen = swab16(rbd->status)) & RBD_LAST) /* the first and the last buffer? */
{
totlen &= RBD_MASK; /* length of this frame */
rbd->status = 0;
skb = (struct sk_buff *) dev_alloc_skb(totlen+2);
if(skb != NULL)
{
skb_reserve(skb,2);
skb_put(skb,totlen);
skb_copy_to_linear_data(skb,(char *) p->base+swab32((unsigned long) rbd->buffer),totlen);
skb->protocol=eth_type_trans(skb,dev);
netif_rx(skb);
p->stats.rx_packets++;
}
else
p->stats.rx_dropped++;
}
else
{
int rstat;
/* free all RBD's until RBD_LAST is set */
totlen = 0;
while(!((rstat=swab16(rbd->status)) & RBD_LAST))
{
totlen += rstat & RBD_MASK;
if(!rstat)
{
printk("%s: Whoops .. no end mark in RBD list\n",dev->name);
break;
}
rbd->status = 0;
rbd = (struct rbd_struct *) make32(rbd->next);
}
totlen += rstat & RBD_MASK;
rbd->status = 0;
printk("%s: received oversized frame! length: %d\n",dev->name,totlen);
p->stats.rx_dropped++;
}
}
else /* frame !(ok), only with 'save-bad-frames' */
{
printk("%s: oops! rfd-error-status: %04x\n",dev->name,status);
p->stats.rx_errors++;
}
p->rfd_top->stat_high = 0;
p->rfd_top->last = RFD_SUSP; /* maybe exchange by RFD_LAST */
p->rfd_top->rbd_offset = 0xffff;
p->rfd_last->last = 0; /* delete RFD_SUSP */
p->rfd_last = p->rfd_top;
p->rfd_top = (struct rfd_struct *) make32(p->rfd_top->next); /* step to next RFD */
p->scb->rfa_offset = make16(p->rfd_top);
if(debuglevel > 0)
printk("%d",cnt++);
}
if(automatic_resume)
{
WAIT_4_SCB_CMD();
p->scb->cmd_ruc = RUC_RESUME;
sun3_attn586();
WAIT_4_SCB_CMD_RUC();
}
#ifdef WAIT_4_BUSY
{
int i;
for(i=0;i<1024;i++)
{
if(p->rfd_top->status)
break;
DELAY_16();
if(i == 1023)
printk("%s: RU hasn't fetched next RFD (not busy/complete)\n",dev->name);
}
}
#endif
#if 0
if(!at_least_one)
{
int i;
volatile struct rfd_struct *rfds=p->rfd_top;
//.........这里部分代码省略.........
示例4: x25_output
/*
* This is where all X.25 information frames pass.
*
* Returns the amount of user data bytes sent on success
* or a negative error code on failure.
*/
int x25_output(struct sock *sk, struct sk_buff *skb)
{
struct sk_buff *skbn;
unsigned char header[X25_EXT_MIN_LEN];
int err, frontlen, len;
int sent=0, noblock = X25_SKB_CB(skb)->flags & MSG_DONTWAIT;
struct x25_sock *x25 = x25_sk(sk);
int header_len = x25->neighbour->extended ? X25_EXT_MIN_LEN :
X25_STD_MIN_LEN;
int max_len = x25_pacsize_to_bytes(x25->facilities.pacsize_out);
if (skb->len - header_len > max_len) {
/* Save a copy of the Header */
skb_copy_from_linear_data(skb, header, header_len);
skb_pull(skb, header_len);
frontlen = skb_headroom(skb);
while (skb->len > 0) {
release_sock(sk);
skbn = sock_alloc_send_skb(sk, frontlen + max_len,
noblock, &err);
lock_sock(sk);
if (!skbn) {
if (err == -EWOULDBLOCK && noblock){
kfree_skb(skb);
return sent;
}
SOCK_DEBUG(sk, "x25_output: fragment alloc"
" failed, err=%d, %d bytes "
"sent\n", err, sent);
return err;
}
skb_reserve(skbn, frontlen);
len = max_len > skb->len ? skb->len : max_len;
/* Copy the user data */
skb_copy_from_linear_data(skb, skb_put(skbn, len), len);
skb_pull(skb, len);
/* Duplicate the Header */
skb_push(skbn, header_len);
skb_copy_to_linear_data(skbn, header, header_len);
if (skb->len > 0) {
if (x25->neighbour->extended)
skbn->data[3] |= X25_EXT_M_BIT;
else
skbn->data[2] |= X25_STD_M_BIT;
}
skb_queue_tail(&sk->sk_write_queue, skbn);
sent += len;
}
kfree_skb(skb);
} else {
skb_queue_tail(&sk->sk_write_queue, skb);
sent = skb->len - header_len;
}
return sent;
}
示例5: receive_packet
static int
receive_packet (struct net_device *dev)
{
struct netdev_private *np = netdev_priv(dev);
int entry = np->cur_rx % RX_RING_SIZE;
int cnt = 30;
/* */
while (1) {
struct netdev_desc *desc = &np->rx_ring[entry];
int pkt_len;
u64 frame_status;
if (!(desc->status & cpu_to_le64(RFDDone)) ||
!(desc->status & cpu_to_le64(FrameStart)) ||
!(desc->status & cpu_to_le64(FrameEnd)))
break;
/* */
frame_status = le64_to_cpu(desc->status);
pkt_len = frame_status & 0xffff;
if (--cnt < 0)
break;
/* */
if (frame_status & RFS_Errors) {
np->stats.rx_errors++;
if (frame_status & (RxRuntFrame | RxLengthError))
np->stats.rx_length_errors++;
if (frame_status & RxFCSError)
np->stats.rx_crc_errors++;
if (frame_status & RxAlignmentError && np->speed != 1000)
np->stats.rx_frame_errors++;
if (frame_status & RxFIFOOverrun)
np->stats.rx_fifo_errors++;
} else {
struct sk_buff *skb;
/* */
if (pkt_len > copy_thresh) {
pci_unmap_single (np->pdev,
desc_to_dma(desc),
np->rx_buf_sz,
PCI_DMA_FROMDEVICE);
skb_put (skb = np->rx_skbuff[entry], pkt_len);
np->rx_skbuff[entry] = NULL;
} else if ((skb = netdev_alloc_skb_ip_align(dev, pkt_len))) {
pci_dma_sync_single_for_cpu(np->pdev,
desc_to_dma(desc),
np->rx_buf_sz,
PCI_DMA_FROMDEVICE);
skb_copy_to_linear_data (skb,
np->rx_skbuff[entry]->data,
pkt_len);
skb_put (skb, pkt_len);
pci_dma_sync_single_for_device(np->pdev,
desc_to_dma(desc),
np->rx_buf_sz,
PCI_DMA_FROMDEVICE);
}
skb->protocol = eth_type_trans (skb, dev);
#if 0
/* */
if (np->pdev->pci_rev_id >= 0x0c &&
!(frame_status & (TCPError | UDPError | IPError))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
#endif
netif_rx (skb);
}
entry = (entry + 1) % RX_RING_SIZE;
}
spin_lock(&np->rx_lock);
np->cur_rx = entry;
/* */
entry = np->old_rx;
while (entry != np->cur_rx) {
struct sk_buff *skb;
/* */
if (np->rx_skbuff[entry] == NULL) {
skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz);
if (skb == NULL) {
np->rx_ring[entry].fraginfo = 0;
printk (KERN_INFO
"%s: receive_packet: "
"Unable to re-allocate Rx skbuff.#%d\n",
dev->name, entry);
break;
}
np->rx_skbuff[entry] = skb;
np->rx_ring[entry].fraginfo =
cpu_to_le64 (pci_map_single
(np->pdev, skb->data, np->rx_buf_sz,
PCI_DMA_FROMDEVICE));
}
np->rx_ring[entry].fraginfo |=
cpu_to_le64((u64)np->rx_buf_sz << 48);
np->rx_ring[entry].status = 0;
entry = (entry + 1) % RX_RING_SIZE;
}
np->old_rx = entry;
//.........这里部分代码省略.........
示例6: scc_enet_rx
/* During a receive, the cur_rx points to the current incoming buffer.
* When we update through the ring, if the next incoming buffer has
* not been given to the system, we just set the empty indicator,
* effectively tossing the packet.
*/
static int
scc_enet_rx(struct net_device *dev)
{
struct scc_enet_private *cep;
volatile cbd_t *bdp;
struct sk_buff *skb;
ushort pkt_len;
cep = dev->priv;
/* First, grab all of the stats for the incoming packet.
* These get messed up if we get called due to a busy condition.
*/
bdp = cep->cur_rx;
for (;;) {
if (bdp->cbd_sc & BD_ENET_RX_EMPTY)
break;
#ifndef final_version
/* Since we have allocated space to hold a complete frame, both
* the first and last indicators should be set.
*/
if ((bdp->cbd_sc & (BD_ENET_RX_FIRST | BD_ENET_RX_LAST)) !=
(BD_ENET_RX_FIRST | BD_ENET_RX_LAST))
printk("CPM ENET: rcv is not first+last\n");
#endif
/* Frame too long or too short.
*/
if (bdp->cbd_sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
cep->stats.rx_length_errors++;
if (bdp->cbd_sc & BD_ENET_RX_NO) /* Frame alignment */
cep->stats.rx_frame_errors++;
if (bdp->cbd_sc & BD_ENET_RX_CR) /* CRC Error */
cep->stats.rx_crc_errors++;
if (bdp->cbd_sc & BD_ENET_RX_OV) /* FIFO overrun */
cep->stats.rx_crc_errors++;
/* Report late collisions as a frame error.
* On this error, the BD is closed, but we don't know what we
* have in the buffer. So, just drop this frame on the floor.
*/
if (bdp->cbd_sc & BD_ENET_RX_CL) {
cep->stats.rx_frame_errors++;
}
else {
/* Process the incoming frame.
*/
cep->stats.rx_packets++;
pkt_len = bdp->cbd_datlen;
cep->stats.rx_bytes += pkt_len;
/* This does 16 byte alignment, much more than we need.
* The packet length includes FCS, but we don't want to
* include that when passing upstream as it messes up
* bridging applications.
*/
skb = dev_alloc_skb(pkt_len-4);
if (skb == NULL) {
printk("%s: Memory squeeze, dropping packet.\n", dev->name);
cep->stats.rx_dropped++;
}
else {
skb_put(skb,pkt_len-4); /* Make room */
skb_copy_to_linear_data(skb,
(unsigned char *)__va(bdp->cbd_bufaddr),
pkt_len-4);
skb->protocol=eth_type_trans(skb,dev);
netif_rx(skb);
}
}
/* Clear the status flags for this buffer.
*/
bdp->cbd_sc &= ~BD_ENET_RX_STATS;
/* Mark the buffer empty.
*/
bdp->cbd_sc |= BD_ENET_RX_EMPTY;
/* Update BD pointer to next entry.
*/
if (bdp->cbd_sc & BD_ENET_RX_WRAP)
bdp = cep->rx_bd_base;
else
bdp++;
}
cep->cur_rx = (cbd_t *)bdp;
return 0;
}
示例7: udp_uncompress
static int udp_uncompress(struct sk_buff *skb, size_t needed)
{
u8 tmp = 0, val = 0;
struct udphdr uh;
bool fail;
int err;
fail = lowpan_fetch_skb(skb, &tmp, sizeof(tmp));
pr_debug("UDP header uncompression\n");
switch (tmp & LOWPAN_NHC_UDP_CS_P_11) {
case LOWPAN_NHC_UDP_CS_P_00:
fail |= lowpan_fetch_skb(skb, &uh.source, sizeof(uh.source));
fail |= lowpan_fetch_skb(skb, &uh.dest, sizeof(uh.dest));
break;
case LOWPAN_NHC_UDP_CS_P_01:
fail |= lowpan_fetch_skb(skb, &uh.source, sizeof(uh.source));
fail |= lowpan_fetch_skb(skb, &val, sizeof(val));
uh.dest = htons(val + LOWPAN_NHC_UDP_8BIT_PORT);
break;
case LOWPAN_NHC_UDP_CS_P_10:
fail |= lowpan_fetch_skb(skb, &val, sizeof(val));
uh.source = htons(val + LOWPAN_NHC_UDP_8BIT_PORT);
fail |= lowpan_fetch_skb(skb, &uh.dest, sizeof(uh.dest));
break;
case LOWPAN_NHC_UDP_CS_P_11:
fail |= lowpan_fetch_skb(skb, &val, sizeof(val));
uh.source = htons(LOWPAN_NHC_UDP_4BIT_PORT + (val >> 4));
uh.dest = htons(LOWPAN_NHC_UDP_4BIT_PORT + (val & 0x0f));
break;
default:
BUG();
}
pr_debug("uncompressed UDP ports: src = %d, dst = %d\n",
ntohs(uh.source), ntohs(uh.dest));
/* checksum */
if (tmp & LOWPAN_NHC_UDP_CS_C) {
pr_debug_ratelimited("checksum elided currently not supported\n");
fail = true;
} else {
fail |= lowpan_fetch_skb(skb, &uh.check, sizeof(uh.check));
}
if (fail)
return -EINVAL;
/* UDP length needs to be infered from the lower layers
* here, we obtain the hint from the remaining size of the
* frame
*/
uh.len = htons(skb->len + sizeof(struct udphdr));
pr_debug("uncompressed UDP length: src = %d", ntohs(uh.len));
/* replace the compressed UDP head by the uncompressed UDP
* header
*/
err = skb_cow(skb, needed);
if (unlikely(err))
return err;
skb_push(skb, sizeof(struct udphdr));
skb_copy_to_linear_data(skb, &uh, sizeof(struct udphdr));
return 0;
}
示例8: fifo_dma_copy_to_linear_skb
static void fifo_dma_copy_to_linear_skb(struct ccat_eth_fifo *const fifo,
struct sk_buff *skb, const size_t len)
{
skb_copy_to_linear_data(skb, fifo->dma.next->data, len);
}
示例9: pxa_irda_fir_irq_eif
static void pxa_irda_fir_irq_eif(struct pxa_irda *si, struct net_device *dev, int icsr0)
{
unsigned int len, stat, data;
len = DTADR(si->rxdma) - si->dma_rx_buff_phy;
do {
stat = ICSR1;
rmb();
data = ICDR;
if (stat & (ICSR1_CRE | ICSR1_ROR)) {
dev->stats.rx_errors++;
if (stat & ICSR1_CRE) {
printk(KERN_DEBUG "pxa_ir: fir receive CRC error\n");
dev->stats.rx_crc_errors++;
}
if (stat & ICSR1_ROR) {
printk(KERN_DEBUG "pxa_ir: fir receive overrun\n");
dev->stats.rx_over_errors++;
}
} else {
si->dma_rx_buff[len++] = data;
}
if (stat & ICSR1_EOF)
break;
} while (ICSR0 & ICSR0_EIF);
if (stat & ICSR1_EOF) {
struct sk_buff *skb;
if (icsr0 & ICSR0_FRE) {
printk(KERN_ERR "pxa_ir: dropping erroneous frame\n");
dev->stats.rx_dropped++;
return;
}
skb = alloc_skb(len+1,GFP_ATOMIC);
if (!skb) {
printk(KERN_ERR "pxa_ir: fir out of memory for receive skb\n");
dev->stats.rx_dropped++;
return;
}
skb_reserve(skb, 1);
skb_copy_to_linear_data(skb, si->dma_rx_buff, len);
skb_put(skb, len);
skb->dev = dev;
skb_reset_mac_header(skb);
skb->protocol = htons(ETH_P_IRDA);
netif_rx(skb);
dev->stats.rx_packets++;
dev->stats.rx_bytes += len;
}
}
示例10: sendup_buffer
static int sendup_buffer (struct net_device *dev)
{
/* on entry, command is in ltdmacbuf, data in ltdmabuf */
/* called from idle, non-reentrant */
int dnode, snode, llaptype, len;
int sklen;
struct sk_buff *skb;
struct lt_rcvlap *ltc = (struct lt_rcvlap *) ltdmacbuf;
if (ltc->command != LT_RCVLAP) {
printk("unknown command 0x%02x from ltpc card\n",ltc->command);
return(-1);
}
dnode = ltc->dnode;
snode = ltc->snode;
llaptype = ltc->laptype;
len = ltc->length;
sklen = len;
if (llaptype == 1)
sklen += 8; /* correct for short ddp */
if(sklen > 800) {
printk(KERN_INFO "%s: nonsense length in ltpc command 0x14: 0x%08x\n",
dev->name,sklen);
return -1;
}
if ( (llaptype==0) || (llaptype>2) ) {
printk(KERN_INFO "%s: unknown LLAP type: %d\n",dev->name,llaptype);
return -1;
}
skb = dev_alloc_skb(3+sklen);
if (skb == NULL)
{
printk("%s: dropping packet due to memory squeeze.\n",
dev->name);
return -1;
}
skb->dev = dev;
if (sklen > len)
skb_reserve(skb,8);
skb_put(skb,len+3);
skb->protocol = htons(ETH_P_LOCALTALK);
/* add LLAP header */
skb->data[0] = dnode;
skb->data[1] = snode;
skb->data[2] = llaptype;
skb_reset_mac_header(skb); /* save pointer to llap header */
skb_pull(skb,3);
/* copy ddp(s,e)hdr + contents */
skb_copy_to_linear_data(skb, ltdmabuf, len);
skb_reset_transport_header(skb);
dev->stats.rx_packets++;
dev->stats.rx_bytes += skb->len;
/* toss it onwards */
netif_rx(skb);
return 0;
}
示例11: au1000_rx
/*
* Au1000 receive routine.
*/
static int au1000_rx(struct net_device *dev)
{
struct au1000_private *aup = netdev_priv(dev);
struct sk_buff *skb;
volatile rx_dma_t *prxd;
u32 buff_stat, status;
db_dest_t *pDB;
u32 frmlen;
if (au1000_debug > 5)
printk("%s: au1000_rx head %d\n", dev->name, aup->rx_head);
prxd = aup->rx_dma_ring[aup->rx_head];
buff_stat = prxd->buff_stat;
while (buff_stat & RX_T_DONE) {
status = prxd->status;
pDB = aup->rx_db_inuse[aup->rx_head];
update_rx_stats(dev, status);
if (!(status & RX_ERROR)) {
/* good frame */
frmlen = (status & RX_FRAME_LEN_MASK);
frmlen -= 4; /* Remove FCS */
skb = dev_alloc_skb(frmlen + 2);
if (skb == NULL) {
printk(KERN_ERR
"%s: Memory squeeze, dropping packet.\n",
dev->name);
dev->stats.rx_dropped++;
continue;
}
skb_reserve(skb, 2); /* 16 byte IP header align */
skb_copy_to_linear_data(skb,
(unsigned char *)pDB->vaddr, frmlen);
skb_put(skb, frmlen);
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb); /* pass the packet to upper layers */
}
else {
if (au1000_debug > 4) {
if (status & RX_MISSED_FRAME)
printk("rx miss\n");
if (status & RX_WDOG_TIMER)
printk("rx wdog\n");
if (status & RX_RUNT)
printk("rx runt\n");
if (status & RX_OVERLEN)
printk("rx overlen\n");
if (status & RX_COLL)
printk("rx coll\n");
if (status & RX_MII_ERROR)
printk("rx mii error\n");
if (status & RX_CRC_ERROR)
printk("rx crc error\n");
if (status & RX_LEN_ERROR)
printk("rx len error\n");
if (status & RX_U_CNTRL_FRAME)
printk("rx u control frame\n");
}
}
prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE);
aup->rx_head = (aup->rx_head + 1) & (NUM_RX_DMA - 1);
au_sync();
/* next descriptor */
prxd = aup->rx_dma_ring[aup->rx_head];
buff_stat = prxd->buff_stat;
}
return 0;
}
示例12: vnic_rx_skb
int vnic_rx_skb(struct vnic_login *login, struct vnic_rx_ring *ring,
struct ib_wc *wc, int ip_summed, char *eth_hdr_va)
{
u64 wr_id = (unsigned int)wc->wr_id;
struct sk_buff *skb;
int used_frags;
char *va = eth_hdr_va;
int length = wc->byte_len - VNIC_EOIB_HDR_SIZE - VNIC_VLAN_OFFSET(login),
linear_length = (length <= SMALL_PACKET_SIZE) ?
length : SMALL_PACKET_SIZE, hdr_len = min(length, HEADER_COPY_SIZE),
offest = NET_IP_ALIGN + 16;
struct ib_device *ib_dev = login->port->dev->ca;
/* alloc a small linear SKB */
skb = alloc_skb(linear_length + offest, GFP_ATOMIC);
if (unlikely(!skb))
return -ENOMEM;
skb_record_rx_queue(skb, ring->index);
skb_reserve(skb, offest);
if (vnic_linear_small_pkt && length <= SMALL_PACKET_SIZE) {
u64 dma;
/* We are copying all relevant data to the skb - temporarily
* synch buffers for the copy
*/
dma = ring->rx_info[wr_id].dma_addr[0] + VNIC_EOIB_HDR_SIZE +
VNIC_VLAN_OFFSET(login);
ib_dma_sync_single_for_cpu(ib_dev, dma, length,
DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb, va, length);
ib_dma_sync_single_for_device(ib_dev, dma, length,
DMA_FROM_DEVICE);
skb->tail += length;
} else {
/* unmap the needed fragmentand reallocate them. Fragments that
* were not used will not be reused as is. */
used_frags = vnic_unmap_and_replace_rx(ring, ib_dev,
skb_shinfo(skb)->frags,
wr_id, wc->byte_len);
if (!used_frags)
goto free_and_repost;
skb_shinfo(skb)->nr_frags = used_frags;
/* Copy headers into the skb linear buffer */
memcpy(skb->data, va, hdr_len);
skb->tail += hdr_len;
/* Skip headers in first fragment */
skb_shinfo(skb)->frags[0].page_offset +=
(VNIC_EOIB_HDR_SIZE + VNIC_VLAN_OFFSET(login) +
hdr_len);
/* Adjust size of first fragment */
skb_shinfo(skb)->frags[0].size -=
(VNIC_EOIB_HDR_SIZE + VNIC_VLAN_OFFSET(login) +
hdr_len);
skb->data_len = length - hdr_len;
}
/* update skb fields */
skb->len = length;
skb->truesize = length + sizeof(struct sk_buff);
skb->ip_summed = ip_summed;
skb->dev = login->dev;
skb->protocol = eth_type_trans(skb, skb->dev);
return vnic_rx(login, skb, wc);
free_and_repost:
dev_kfree_skb(skb);
return -ENODEV;
}
示例13: ipcomp_decompress
static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb)
{
struct ipcomp_data *ipcd = x->data;
const int plen = skb->len;
int dlen = IPCOMP_SCRATCH_SIZE;
const u8 *start = skb->data;
const int cpu = get_cpu();
u8 *scratch = *per_cpu_ptr(ipcomp_scratches, cpu);
struct crypto_comp *tfm = *per_cpu_ptr(ipcd->tfms, cpu);
int err = crypto_comp_decompress(tfm, start, plen, scratch, &dlen);
int len;
if (err)
goto out;
if (dlen < (plen + sizeof(struct ip_comp_hdr))) {
err = -EINVAL;
goto out;
}
len = dlen - plen;
if (len > skb_tailroom(skb))
len = skb_tailroom(skb);
__skb_put(skb, len);
len += plen;
skb_copy_to_linear_data(skb, scratch, len);
while ((scratch += len, dlen -= len) > 0) {
skb_frag_t *frag;
struct page *page;
err = -EMSGSIZE;
if (WARN_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS))
goto out;
frag = skb_shinfo(skb)->frags + skb_shinfo(skb)->nr_frags;
page = alloc_page(GFP_ATOMIC);
err = -ENOMEM;
if (!page)
goto out;
__skb_frag_set_page(frag, page);
len = PAGE_SIZE;
if (dlen < len)
len = dlen;
frag->page_offset = 0;
skb_frag_size_set(frag, len);
memcpy(skb_frag_address(frag), scratch, len);
skb->truesize += len;
skb->data_len += len;
skb->len += len;
skb_shinfo(skb)->nr_frags++;
}
err = 0;
out:
put_cpu();
return err;
}
示例14: error
/* Transmit routine used by generic_netmap_txsync(). Returns 0 on success
and -1 on error (which may be packet drops or other errors). */
int
nm_os_generic_xmit_frame(struct nm_os_gen_arg *a)
{
struct mbuf *m = a->m;
struct ifnet *ifp = a->ifp;
u_int len = a->len;
netdev_tx_t ret;
/* We know that the driver needs to prepend ifp->needed_headroom bytes
* to each packet to be transmitted. We then reset the mbuf pointers
* to the correct initial state:
* ___________________________________________
* ^ ^ ^
* | | |
* head data end
* tail
*
* which correspond to an empty buffer with exactly
* ifp->needed_headroom bytes between head and data.
*/
m->len = 0;
m->data = m->head + ifp->needed_headroom;
skb_reset_tail_pointer(m);
skb_reset_mac_header(m);
skb_reset_network_header(m);
/* Copy a netmap buffer into the mbuf.
* TODO Support the slot flags (NS_MOREFRAG, NS_INDIRECT). */
skb_copy_to_linear_data(m, a->addr, len); // skb_store_bits(m, 0, addr, len);
skb_put(m, len);
/* Hold a reference on this, we are going to recycle mbufs as
* much as possible. */
NM_ATOMIC_INC(&m->users);
/* On linux m->dev is not reliable, since it can be changed by the
* ndo_start_xmit() callback. This happens, for instance, with veth
* and bridge drivers. For this reason, the nm_os_generic_xmit_frame()
* implementation for linux stores a copy of m->dev into the
* destructor_arg field. */
m->dev = ifp;
skb_shinfo(m)->destructor_arg = m->dev;
/* Tell generic_ndo_start_xmit() to pass this mbuf to the driver. */
skb_set_queue_mapping(m, a->ring_nr);
m->priority = a->qevent ? NM_MAGIC_PRIORITY_TXQE : NM_MAGIC_PRIORITY_TX;
ret = dev_queue_xmit(m);
if (unlikely(ret != NET_XMIT_SUCCESS)) {
/* Reset priority, so that generic_netmap_tx_clean() can
* reclaim this mbuf. */
m->priority = 0;
/* Qdisc queue is full (this cannot happen with
* the netmap-aware qdisc, see exaplanation in
* netmap_generic_txsync), or qdisc is being
* deactivated. In the latter case dev_queue_xmit()
* does not call the enqueue method and returns
* NET_XMIT_DROP.
* If there is no carrier, the generic qdisc is
* not yet active (is pending in the qdisc_sleeping
* field), and so the temporary noop qdisc enqueue
* method will drop the packet and return NET_XMIT_CN.
*/
RD(3, "Warning: dev_queue_xmit() is dropping [%d]", ret);
return -1;
}
return 0;
}
示例15: liquidio_push_packet
/** Routine to push packets arriving on Octeon interface upto network layer.
* @param oct_id - octeon device id.
* @param skbuff - skbuff struct to be passed to network layer.
* @param len - size of total data received.
* @param rh - Control header associated with the packet
* @param param - additional control data with the packet
* @param arg - farg registered in droq_ops
*/
static void
liquidio_push_packet(u32 octeon_id __attribute__((unused)),
void *skbuff,
u32 len,
union octeon_rh *rh,
void *param,
void *arg)
{
struct net_device *netdev = (struct net_device *)arg;
struct octeon_droq *droq =
container_of(param, struct octeon_droq, napi);
struct sk_buff *skb = (struct sk_buff *)skbuff;
struct skb_shared_hwtstamps *shhwtstamps;
struct napi_struct *napi = param;
u16 vtag = 0;
u32 r_dh_off;
u64 ns;
if (netdev) {
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
/* Do not proceed if the interface is not in RUNNING state. */
if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
recv_buffer_free(skb);
droq->stats.rx_dropped++;
return;
}
skb->dev = netdev;
skb_record_rx_queue(skb, droq->q_no);
if (likely(len > MIN_SKB_SIZE)) {
struct octeon_skb_page_info *pg_info;
unsigned char *va;
pg_info = ((struct octeon_skb_page_info *)(skb->cb));
if (pg_info->page) {
/* For Paged allocation use the frags */
va = page_address(pg_info->page) +
pg_info->page_offset;
memcpy(skb->data, va, MIN_SKB_SIZE);
skb_put(skb, MIN_SKB_SIZE);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
pg_info->page,
pg_info->page_offset +
MIN_SKB_SIZE,
len - MIN_SKB_SIZE,
LIO_RXBUFFER_SZ);
}
} else {
struct octeon_skb_page_info *pg_info =
((struct octeon_skb_page_info *)(skb->cb));
skb_copy_to_linear_data(skb, page_address(pg_info->page)
+ pg_info->page_offset, len);
skb_put(skb, len);
put_page(pg_info->page);
}
r_dh_off = (rh->r_dh.len - 1) * BYTES_PER_DHLEN_UNIT;
if (oct->ptp_enable) {
if (rh->r_dh.has_hwtstamp) {
/* timestamp is included from the hardware at
* the beginning of the packet.
*/
if (ifstate_check
(lio,
LIO_IFSTATE_RX_TIMESTAMP_ENABLED)) {
/* Nanoseconds are in the first 64-bits
* of the packet.
*/
memcpy(&ns, (skb->data + r_dh_off),
sizeof(ns));
r_dh_off -= BYTES_PER_DHLEN_UNIT;
shhwtstamps = skb_hwtstamps(skb);
shhwtstamps->hwtstamp =
ns_to_ktime(ns +
lio->ptp_adjust);
}
}
}
if (rh->r_dh.has_hash) {
__be32 *hash_be = (__be32 *)(skb->data + r_dh_off);
u32 hash = be32_to_cpu(*hash_be);
skb_set_hash(skb, hash, PKT_HASH_TYPE_L4);
r_dh_off -= BYTES_PER_DHLEN_UNIT;
}
skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT);
//.........这里部分代码省略.........