本文整理汇总了C++中skb_shinfo函数的典型用法代码示例。如果您正苦于以下问题:C++ skb_shinfo函数的具体用法?C++ skb_shinfo怎么用?C++ skb_shinfo使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了skb_shinfo函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: netvsc_start_xmit
//.........这里部分代码省略.........
vlan->vlanid = skb->vlan_tci & VLAN_VID_MASK;
vlan->pri = (skb->vlan_tci & VLAN_PRIO_MASK) >>
VLAN_PRIO_SHIFT;
}
if (skb_is_gso(skb)) {
struct ndis_tcp_lso_info *lso_info;
rndis_msg_size += NDIS_LSO_PPI_SIZE;
ppi = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,
TCP_LARGESEND_PKTINFO);
lso_info = (struct ndis_tcp_lso_info *)((void *)ppi +
ppi->ppi_offset);
lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
if (skb->protocol == htons(ETH_P_IP)) {
lso_info->lso_v2_transmit.ip_version =
NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
ip_hdr(skb)->tot_len = 0;
ip_hdr(skb)->check = 0;
tcp_hdr(skb)->check =
~csum_tcpudp_magic(ip_hdr(skb)->saddr,
ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
} else {
lso_info->lso_v2_transmit.ip_version =
NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
ipv6_hdr(skb)->payload_len = 0;
tcp_hdr(skb)->check =
~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
}
lso_info->lso_v2_transmit.tcp_header_offset = skb_transport_offset(skb);
lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
if (net_checksum_info(skb) & net_device_ctx->tx_checksum_mask) {
struct ndis_tcp_ip_checksum_info *csum_info;
rndis_msg_size += NDIS_CSUM_PPI_SIZE;
ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
TCPIP_CHKSUM_PKTINFO);
csum_info = (struct ndis_tcp_ip_checksum_info *)((void *)ppi +
ppi->ppi_offset);
csum_info->transmit.tcp_header_offset = skb_transport_offset(skb);
if (skb->protocol == htons(ETH_P_IP)) {
csum_info->transmit.is_ipv4 = 1;
if (ip_hdr(skb)->protocol == IPPROTO_TCP)
csum_info->transmit.tcp_checksum = 1;
else
csum_info->transmit.udp_checksum = 1;
} else {
csum_info->transmit.is_ipv6 = 1;
if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
csum_info->transmit.tcp_checksum = 1;
else
csum_info->transmit.udp_checksum = 1;
}
} else {
/* Can't do offload of this type of checksum */
if (skb_checksum_help(skb))
goto drop;
示例2: start_xmit
static int start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
int num, err;
struct scatterlist sg[1+MAX_SKB_FRAGS];
struct virtio_net_hdr *hdr;
const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
sg_init_table(sg, 1+MAX_SKB_FRAGS);
pr_debug("%s: xmit %p " MAC_FMT "\n", dev->name, skb,
dest[0], dest[1], dest[2],
dest[3], dest[4], dest[5]);
/* Encode metadata header at front. */
hdr = skb_vnet_hdr(skb);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
hdr->csum_start = skb->csum_start - skb_headroom(skb);
hdr->csum_offset = skb->csum_offset;
} else {
hdr->flags = 0;
hdr->csum_offset = hdr->csum_start = 0;
}
if (skb_is_gso(skb)) {
hdr->hdr_len = skb_transport_header(skb) - skb->data;
hdr->gso_size = skb_shinfo(skb)->gso_size;
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
else
BUG();
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
} else {
hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE;
hdr->gso_size = hdr->hdr_len = 0;
}
vnet_hdr_to_sg(sg, skb);
num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
__skb_queue_head(&vi->send, skb);
again:
/* Free up any pending old buffers before queueing new ones. */
free_old_xmit_skbs(vi);
err = vi->svq->vq_ops->add_buf(vi->svq, sg, num, 0, skb);
if (err) {
pr_debug("%s: virtio not prepared to send\n", dev->name);
netif_stop_queue(dev);
/* Activate callback for using skbs: if this returns false it
* means some were used in the meantime. */
if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
vi->svq->vq_ops->disable_cb(vi->svq);
netif_start_queue(dev);
goto again;
}
__skb_unlink(skb, &vi->send);
return NETDEV_TX_BUSY;
}
vi->svq->vq_ops->kick(vi->svq);
return 0;
}
示例3: talitos_process
//.........这里部分代码省略.........
crd1->crd_alg == CRYPTO_SHA1_HMAC ||
crd1->crd_alg == CRYPTO_MD5 ||
crd1->crd_alg == CRYPTO_SHA1) &&
(crd2->crd_alg == CRYPTO_DES_CBC ||
crd2->crd_alg == CRYPTO_3DES_CBC ||
crd2->crd_alg == CRYPTO_AES_CBC ||
crd2->crd_alg == CRYPTO_ARC4) &&
((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
maccrd = crd1;
enccrd = crd2;
} else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
crd1->crd_alg == CRYPTO_ARC4 ||
crd1->crd_alg == CRYPTO_3DES_CBC ||
crd1->crd_alg == CRYPTO_AES_CBC) &&
(crd2->crd_alg == CRYPTO_MD5_HMAC ||
crd2->crd_alg == CRYPTO_SHA1_HMAC ||
crd2->crd_alg == CRYPTO_MD5 ||
crd2->crd_alg == CRYPTO_SHA1) &&
(crd1->crd_flags & CRD_F_ENCRYPT)) {
enccrd = crd1;
maccrd = crd2;
} else {
/* We cannot order the SEC as requested */
printk("%s: cannot do the order\n",
device_get_nameunit(sc->sc_cdev));
err = EINVAL;
goto errout;
}
}
/* assign in_fifo and out_fifo based on input/output struct type */
if (crp->crp_flags & CRYPTO_F_SKBUF) {
/* using SKB buffers */
struct sk_buff *skb = (struct sk_buff *)crp->crp_buf;
if (skb_shinfo(skb)->nr_frags) {
printk("%s: skb frags unimplemented\n",
device_get_nameunit(sc->sc_cdev));
err = EINVAL;
goto errout;
}
td->ptr[in_fifo].ptr = dma_map_single(NULL, skb->data,
skb->len, DMA_TO_DEVICE);
td->ptr[in_fifo].len = skb->len;
td->ptr[out_fifo].ptr = dma_map_single(NULL, skb->data,
skb->len, DMA_TO_DEVICE);
td->ptr[out_fifo].len = skb->len;
td->ptr[hmac_data].ptr = dma_map_single(NULL, skb->data,
skb->len, DMA_TO_DEVICE);
} else if (crp->crp_flags & CRYPTO_F_IOV) {
/* using IOV buffers */
struct uio *uiop = (struct uio *)crp->crp_buf;
if (uiop->uio_iovcnt > 1) {
printk("%s: iov frags unimplemented\n",
device_get_nameunit(sc->sc_cdev));
err = EINVAL;
goto errout;
}
td->ptr[in_fifo].ptr = dma_map_single(NULL,
uiop->uio_iov->iov_base, crp->crp_ilen, DMA_TO_DEVICE);
td->ptr[in_fifo].len = crp->crp_ilen;
/* crp_olen is never set; always use crp_ilen */
td->ptr[out_fifo].ptr = dma_map_single(NULL,
uiop->uio_iov->iov_base,
crp->crp_ilen, DMA_TO_DEVICE);
td->ptr[out_fifo].len = crp->crp_ilen;
} else {
/* using contig buffers */
示例4: wireless_send_event
//.........这里部分代码省略.........
*
* This padding exists because we manipulate event->u,
* and 'event' is not packed.
*
* An iw_point event is laid out like this instead:
* | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 |
* | event.len | event.cmd | p a d d i n g |
* | iwpnt.len | iwpnt.flg | p a d d i n g |
* | extra data ...
*
* The second padding exists because struct iw_point is extended,
* but this depends on the platform...
*
* On 32-bit, all the padding shouldn't be there.
*/
skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
if (!skb)
return;
/* Send via the RtNetlink event channel */
nlh = rtnetlink_ifinfo_prep(dev, skb);
if (WARN_ON(!nlh)) {
kfree_skb(skb);
return;
}
/* Add the wireless events in the netlink packet */
nla = nla_reserve(skb, IFLA_WIRELESS, event_len);
if (!nla) {
kfree_skb(skb);
return;
}
event = nla_data(nla);
/* Fill event - first clear to avoid data leaking */
memset(event, 0, hdr_len);
event->len = event_len;
event->cmd = cmd;
memcpy(&event->u, ((char *) wrqu) + wrqu_off, hdr_len - IW_EV_LCP_LEN);
if (extra_len)
memcpy(((char *) event) + hdr_len, extra, extra_len);
nlmsg_end(skb, nlh);
#ifdef CONFIG_COMPAT
hdr_len = compat_event_type_size[descr->header_type];
event_len = hdr_len + extra_len;
compskb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
if (!compskb) {
kfree_skb(skb);
return;
}
/* Send via the RtNetlink event channel */
nlh = rtnetlink_ifinfo_prep(dev, compskb);
if (WARN_ON(!nlh)) {
kfree_skb(skb);
kfree_skb(compskb);
return;
}
/* Add the wireless events in the netlink packet */
nla = nla_reserve(compskb, IFLA_WIRELESS, event_len);
if (!nla) {
kfree_skb(skb);
kfree_skb(compskb);
return;
}
compat_event = nla_data(nla);
compat_event->len = event_len;
compat_event->cmd = cmd;
if (descr->header_type == IW_HEADER_TYPE_POINT) {
compat_wrqu.length = wrqu->data.length;
compat_wrqu.flags = wrqu->data.flags;
memcpy(&compat_event->pointer,
((char *) &compat_wrqu) + IW_EV_COMPAT_POINT_OFF,
hdr_len - IW_EV_COMPAT_LCP_LEN);
if (extra_len)
memcpy(((char *) compat_event) + hdr_len,
extra, extra_len);
} else {
/* extra_len must be zero, so no if (extra) needed */
memcpy(&compat_event->pointer, wrqu,
hdr_len - IW_EV_COMPAT_LCP_LEN);
}
nlmsg_end(compskb, nlh);
skb_shinfo(skb)->frag_list = compskb;
#endif
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
skb_queue_tail(&dev_net(dev)->wext_nlevents, skb);
schedule_work(&wireless_nlevent_work);
#else
skb_queue_tail(&wireless_nlevent_queue, skb);
tasklet_schedule(&wireless_nlevent_tasklet);
#endif
}
示例5: skb_head_from_pool
struct sk_buff *skb_clone(struct sk_buff *skb, int gfp_mask)
{
struct sk_buff *n;
n = skb_head_from_pool();
if (!n) {
n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
if (!n)
return NULL;
}
#define C(x) n->x = skb->x
n->next = n->prev = NULL;
n->list = NULL;
n->sk = NULL;
C(stamp);
C(dev);
C(h);
C(nh);
C(mac);
C(dst);
dst_clone(n->dst);
memcpy(n->cb, skb->cb, sizeof(skb->cb));
C(len);
C(data_len);
C(csum);
n->cloned = 1;
C(pkt_type);
C(ip_summed);
C(priority);
atomic_set(&n->users, 1);
C(protocol);
C(security);
C(truesize);
C(head);
C(data);
C(tail);
C(end);
n->destructor = NULL;
#ifdef CONFIG_NETFILTER
C(nfmark);
C(nfcache);
C(nfct);
#ifdef CONFIG_NETFILTER_DEBUG
C(nf_debug);
#endif
#endif /*CONFIG_NETFILTER*/
#if defined(CONFIG_HIPPI)
C(private);
#endif
#ifdef CONFIG_NET_SCHED
C(tc_index);
#endif
atomic_inc(&(skb_shinfo(skb)->dataref));
skb->cloned = 1;
#ifdef CONFIG_NETFILTER
nf_conntrack_get(skb->nfct);
#endif
return n;
}
示例6: ip_frag_reasm
static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
struct net_device *dev)
{
struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
struct iphdr *iph;
struct sk_buff *fp, *head = qp->q.fragments;
int len;
int ihlen;
int err;
int sum_truesize;
u8 ecn;
ipq_kill(qp);
ecn = ip_frag_ecn_table[qp->ecn];
if (unlikely(ecn == 0xff)) {
err = -EINVAL;
goto out_fail;
}
/* Make the one we just received the head. */
if (prev) {
head = prev->next;
fp = skb_clone(head, GFP_ATOMIC);
if (!fp)
goto out_nomem;
fp->next = head->next;
if (!fp->next)
qp->q.fragments_tail = fp;
prev->next = fp;
skb_morph(head, qp->q.fragments);
head->next = qp->q.fragments->next;
consume_skb(qp->q.fragments);
qp->q.fragments = head;
}
WARN_ON(head == NULL);
WARN_ON(FRAG_CB(head)->offset != 0);
/* Allocate a new buffer for the datagram. */
ihlen = ip_hdrlen(head);
len = ihlen + qp->q.len;
err = -E2BIG;
if (len > 65535)
goto out_oversize;
/* Head of list must not be cloned. */
if (skb_unclone(head, GFP_ATOMIC))
goto out_nomem;
/* If the first fragment is fragmented itself, we split
* it to two chunks: the first with data and paged part
* and the second, holding only fragments. */
if (skb_has_frag_list(head)) {
struct sk_buff *clone;
int i, plen = 0;
if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL)
goto out_nomem;
clone->next = head->next;
head->next = clone;
skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
skb_frag_list_init(head);
for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
clone->len = clone->data_len = head->data_len - plen;
head->data_len -= clone->len;
head->len -= clone->len;
clone->csum = 0;
clone->ip_summed = head->ip_summed;
add_frag_mem_limit(&qp->q, clone->truesize);
}
skb_push(head, head->data - skb_network_header(head));
sum_truesize = head->truesize;
for (fp = head->next; fp;) {
bool headstolen;
int delta;
struct sk_buff *next = fp->next;
sum_truesize += fp->truesize;
if (head->ip_summed != fp->ip_summed)
head->ip_summed = CHECKSUM_NONE;
else if (head->ip_summed == CHECKSUM_COMPLETE)
head->csum = csum_add(head->csum, fp->csum);
if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
kfree_skb_partial(fp, headstolen);
} else {
if (!skb_shinfo(head)->frag_list)
skb_shinfo(head)->frag_list = fp;
head->data_len += fp->len;
head->len += fp->len;
head->truesize += fp->truesize;
}
fp = next;
//.........这里部分代码省略.........
示例7: xennet_make_frags
static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
struct xen_netif_tx_request *tx)
{
struct netfront_info *np = netdev_priv(dev);
char *data = skb->data;
unsigned long mfn;
RING_IDX prod = np->tx.req_prod_pvt;
int frags = skb_shinfo(skb)->nr_frags;
unsigned int offset = offset_in_page(data);
unsigned int len = skb_headlen(skb);
unsigned int id;
grant_ref_t ref;
int i;
while (len > PAGE_SIZE - offset) {
tx->size = PAGE_SIZE - offset;
tx->flags |= XEN_NETTXF_more_data;
len -= tx->size;
data += tx->size;
offset = 0;
id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
np->tx_skbs[id].skb = skb_get(skb);
tx = RING_GET_REQUEST(&np->tx, prod++);
tx->id = id;
ref = gnttab_claim_grant_reference(&np->gref_tx_head);
BUG_ON((signed short)ref < 0);
mfn = virt_to_mfn(data);
gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
mfn, GNTMAP_readonly);
tx->gref = np->grant_tx_ref[id] = ref;
tx->offset = offset;
tx->size = len;
tx->flags = 0;
}
for (i = 0; i < frags; i++) {
skb_frag_t *frag = skb_shinfo(skb)->frags + i;
tx->flags |= XEN_NETTXF_more_data;
id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
np->tx_skbs[id].skb = skb_get(skb);
tx = RING_GET_REQUEST(&np->tx, prod++);
tx->id = id;
ref = gnttab_claim_grant_reference(&np->gref_tx_head);
BUG_ON((signed short)ref < 0);
mfn = pfn_to_mfn(page_to_pfn(skb_frag_page(frag)));
gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
mfn, GNTMAP_readonly);
tx->gref = np->grant_tx_ref[id] = ref;
tx->offset = frag->page_offset;
tx->size = skb_frag_size(frag);
tx->flags = 0;
}
np->tx.req_prod_pvt = prod;
}
示例8: skb_copy_datagram_iovec
/**
* skb_copy_datagram_iovec - Copy a datagram to an iovec.
* @skb: buffer to copy
* @offset: offset in the buffer to start copying from
* @to: io vector to copy to
* @len: amount of data to copy from buffer to iovec
*
* Note: the iovec is modified during the copy.
*/
int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
struct iovec *to, int len)
{
int start = skb_headlen(skb);
int i, copy = start - offset;
struct sk_buff *frag_iter;
trace_skb_copy_datagram_iovec(skb, len);
/* Copy header. */
if (copy > 0) {
if (copy > len)
copy = len;
if (memcpy_toiovec(to, skb->data + offset, copy))
goto fault;
if ((len -= copy) == 0)
return 0;
offset += copy;
}
/* Copy paged appendix. Hmm... why does this look so complicated? */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
WARN_ON(start > offset + len);
end = start + skb_shinfo(skb)->frags[i].size;
if ((copy = end - offset) > 0) {
int err;
u8 *vaddr;
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
struct page *page = frag->page;
if (copy > len)
copy = len;
vaddr = kmap(page);
err = memcpy_toiovec(to, vaddr + frag->page_offset +
offset - start, copy);
kunmap(page);
if (err)
goto fault;
if (!(len -= copy))
return 0;
offset += copy;
}
start = end;
}
skb_walk_frags(skb, frag_iter) {
int end;
WARN_ON(start > offset + len);
end = start + frag_iter->len;
if ((copy = end - offset) > 0) {
if (copy > len)
copy = len;
if (skb_copy_datagram_iovec(frag_iter,
offset - start,
to, copy))
goto fault;
if ((len -= copy) == 0)
return 0;
offset += copy;
}
start = end;
}
示例9: kmem_cache_alloc
/**
* __alloc_skb - allocate a network buffer
* @size: size to allocate
* @gfp_mask: allocation mask
* @fclone: allocate from fclone cache instead of head cache
* and allocate a cloned (child) skb
*
* Allocate a new &sk_buff. The returned buffer has no headroom and a
* tail room of size bytes. The object has a reference count of one.
* The return is the buffer. On a failure the return is %NULL.
*
* Buffers may only be allocated from interrupts using a @gfp_mask of
* %GFP_ATOMIC.
*/
struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
int fclone)
{
kmem_cache_t *cache;
struct skb_shared_info *shinfo;
struct sk_buff *skb;
u8 *data;
cache = fclone ? skbuff_fclone_cache : skbuff_head_cache;
/* Get the HEAD */
skb = kmem_cache_alloc(cache, gfp_mask & ~__GFP_DMA);
if (!skb)
goto out;
/* Get the DATA. Size must match skb_add_mtu(). */
size = SKB_DATA_ALIGN(size);
//data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
//data = kmalloc(size + sizeof(struct skb_shared_info)+4, gfp_mask);//incifer support vlan id
/* 20007/10/16 pppoe acc incifer LAN2WAN */
data = kmalloc(size + sizeof(struct skb_shared_info) + NK_EXTRA_OFFSET, gfp_mask);
if (!data)
goto nodata;
memset(skb, 0, offsetof(struct sk_buff, truesize));
skb->truesize = size + sizeof(struct sk_buff);
atomic_set(&skb->users, 1);
skb->head = data;
//skb->data = data+4;//incifer
/* 20007/10/16 pppoe acc incifer LAN2WAN */
skb->data = data + NK_EXTRA_OFFSET;
//skb->tail = data+4;//incifer
/* 20007/10/16 pppoe acc incifer LAN2WAN */
skb->tail = data + NK_EXTRA_OFFSET;
//skb->end = data + size+4;//incifer
/* 20007/10/16 pppoe acc incifer LAN2WAN */
skb->end = data + size + NK_EXTRA_OFFSET;
/* make sure we initialize shinfo sequentially */
shinfo = skb_shinfo(skb);
atomic_set(&shinfo->dataref, 1);
shinfo->nr_frags = 0;
shinfo->tso_size = 0;
shinfo->tso_segs = 0;
shinfo->ufo_size = 0;
shinfo->ip6_frag_id = 0;
shinfo->frag_list = NULL;
if (fclone) {
struct sk_buff *child = skb + 1;
atomic_t *fclone_ref = (atomic_t *) (child + 1);
skb->fclone = SKB_FCLONE_ORIG;
atomic_set(fclone_ref, 1);
child->fclone = SKB_FCLONE_UNAVAILABLE;
}
out:
return skb;
nodata:
kmem_cache_free(cache, skb);
skb = NULL;
goto out;
}
示例10: is_gre_gso
static bool is_gre_gso(struct sk_buff *skb)
{
return skb_shinfo(skb)->gso_type &
(SKB_GSO_GRE | SKB_GSO_GRE_CSUM);
}
示例11: cp_start_xmit
static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
{
struct cp_private *cp = netdev_priv(dev);
unsigned entry;
u32 eor;
#if CP_VLAN_TAG_USED
u32 vlan_tag = 0;
#endif
spin_lock_irq(&cp->lock);
/* This is a hard error, log it. */
if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) {
netif_stop_queue(dev);
spin_unlock_irq(&cp->lock);
printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
dev->name);
return 1;
}
#if CP_VLAN_TAG_USED
if (cp->vlgrp && vlan_tx_tag_present(skb))
vlan_tag = TxVlanTag | cpu_to_be16(vlan_tx_tag_get(skb));
#endif
entry = cp->tx_head;
eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
if (skb_shinfo(skb)->nr_frags == 0) {
struct cp_desc *txd = &cp->tx_ring[entry];
u32 len;
dma_addr_t mapping;
len = skb->len;
mapping = pci_map_single(cp->pdev, skb->data, len, PCI_DMA_TODEVICE);
CP_VLAN_TX_TAG(txd, vlan_tag);
txd->addr = cpu_to_le64(mapping);
wmb();
if (skb->ip_summed == CHECKSUM_HW) {
const struct iphdr *ip = skb->nh.iph;
if (ip->protocol == IPPROTO_TCP)
txd->opts1 = cpu_to_le32(eor | len | DescOwn |
FirstFrag | LastFrag |
IPCS | TCPCS);
else if (ip->protocol == IPPROTO_UDP)
txd->opts1 = cpu_to_le32(eor | len | DescOwn |
FirstFrag | LastFrag |
IPCS | UDPCS);
else
BUG();
} else
txd->opts1 = cpu_to_le32(eor | len | DescOwn |
FirstFrag | LastFrag);
wmb();
cp->tx_skb[entry].skb = skb;
cp->tx_skb[entry].mapping = mapping;
cp->tx_skb[entry].frag = 0;
entry = NEXT_TX(entry);
} else {
struct cp_desc *txd;
u32 first_len, first_eor;
dma_addr_t first_mapping;
int frag, first_entry = entry;
const struct iphdr *ip = skb->nh.iph;
/* We must give this initial chunk to the device last.
* Otherwise we could race with the device.
*/
first_eor = eor;
first_len = skb_headlen(skb);
first_mapping = pci_map_single(cp->pdev, skb->data,
first_len, PCI_DMA_TODEVICE);
cp->tx_skb[entry].skb = skb;
cp->tx_skb[entry].mapping = first_mapping;
cp->tx_skb[entry].frag = 1;
entry = NEXT_TX(entry);
for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
u32 len;
u32 ctrl;
dma_addr_t mapping;
len = this_frag->size;
mapping = pci_map_single(cp->pdev,
((void *) page_address(this_frag->page) +
this_frag->page_offset),
len, PCI_DMA_TODEVICE);
eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
if (skb->ip_summed == CHECKSUM_HW) {
ctrl = eor | len | DescOwn | IPCS;
if (ip->protocol == IPPROTO_TCP)
ctrl |= TCPCS;
else if (ip->protocol == IPPROTO_UDP)
ctrl |= UDPCS;
else
BUG();
} else
//.........这里部分代码省略.........
示例12: atomic_inc
struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
{
struct sk_buff *n;
n = skb + 1;
if (skb->fclone == SKB_FCLONE_ORIG &&
n->fclone == SKB_FCLONE_UNAVAILABLE) {
atomic_t *fclone_ref = (atomic_t *) (n + 1);
n->fclone = SKB_FCLONE_CLONE;
atomic_inc(fclone_ref);
} else {
n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
if (!n)
return NULL;
n->fclone = SKB_FCLONE_UNAVAILABLE;
}
#define C(x) n->x = skb->x
n->next = n->prev = NULL;
n->sk = NULL;
C(tstamp);
C(dev);
C(h);
C(nh);
C(mac);
C(dst);
dst_clone(skb->dst);
C(sp);
#ifdef CONFIG_INET
secpath_get(skb->sp);
#endif
memcpy(n->cb, skb->cb, sizeof(skb->cb));
C(len);
C(data_len);
C(csum);
C(local_df);
n->cloned = 1;
n->nohdr = 0;
C(pkt_type);
C(ip_summed);
C(priority);
#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
C(ipvs_property);
#endif
C(protocol);
n->destructor = NULL;
#ifdef CONFIG_NETFILTER
C(nfmark);
C(nfct);
nf_conntrack_get(skb->nfct);
C(nfctinfo);
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
C(nfct_reasm);
nf_conntrack_get_reasm(skb->nfct_reasm);
#endif
#ifdef CONFIG_BRIDGE_NETFILTER
C(nf_bridge);
nf_bridge_get(skb->nf_bridge);
#endif
#endif /*CONFIG_NETFILTER*/
#ifdef CONFIG_NET_SCHED
C(tc_index);
#ifdef CONFIG_NET_CLS_ACT
n->tc_verd = SET_TC_VERD(skb->tc_verd,0);
n->tc_verd = CLR_TC_OK2MUNGE(n->tc_verd);
n->tc_verd = CLR_TC_MUNGED(n->tc_verd);
C(input_dev);
#endif
#endif
C(truesize);
atomic_set(&n->users, 1);
C(head);
C(data);
C(tail);
C(end);
atomic_inc(&(skb_shinfo(skb)->dataref));
skb->cloned = 1;
return n;
}
示例13: skb_put_bits
/** Copy some data bits from a kernel buffer to an skb.
* Derived in the obvious way from skb_copy_bits().
*/
int skb_put_bits(const struct sk_buff *skb, int offset, void *src, int len)
{
int i, copy;
int start = skb->len - skb->data_len;
if (offset > (int)skb->len-len)
goto fault;
/* Copy header. */
if ((copy = start-offset) > 0) {
if (copy > len)
copy = len;
memcpy(skb->data + offset, src, copy);
if ((len -= copy) == 0)
return 0;
offset += copy;
src += copy;
}
#ifdef __KERNEL__
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
BUG_TRAP(start <= offset+len);
end = start + skb_shinfo(skb)->frags[i].size;
if ((copy = end-offset) > 0) {
u8 *vaddr;
if (copy > len)
copy = len;
vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
memcpy(vaddr + skb_shinfo(skb)->frags[i].page_offset + offset - start,
src,
copy);
kunmap_skb_frag(vaddr);
if ((len -= copy) == 0)
return 0;
offset += copy;
src += copy;
}
start = end;
}
if (skb_shinfo(skb)->frag_list) {
struct sk_buff *list;
for (list = skb_shinfo(skb)->frag_list; list; list=list->next) {
int end;
BUG_TRAP(start <= offset+len);
end = start + list->len;
if ((copy = end-offset) > 0) {
if (copy > len)
copy = len;
if (skb_put_bits(list, offset-start, src, copy))
goto fault;
if ((len -= copy) == 0)
return 0;
offset += copy;
src += copy;
}
start = end;
}
}
#else
i=0;
#endif
if (len == 0)
return 0;
fault:
return -EFAULT;
}
示例14: xennet_alloc_rx_buffers
static void xennet_alloc_rx_buffers(struct net_device *dev)
{
unsigned short id;
struct netfront_info *np = netdev_priv(dev);
struct sk_buff *skb;
struct page *page;
int i, batch_target, notify;
RING_IDX req_prod = np->rx.req_prod_pvt;
grant_ref_t ref;
unsigned long pfn;
void *vaddr;
struct xen_netif_rx_request *req;
if (unlikely(!netif_carrier_ok(dev)))
return;
batch_target = np->rx_target - (req_prod - np->rx.rsp_cons);
for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
skb = __netdev_alloc_skb(dev, RX_COPY_THRESHOLD + NET_IP_ALIGN,
GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb))
goto no_skb;
skb_reserve(skb, NET_IP_ALIGN);
page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
if (!page) {
kfree_skb(skb);
no_skb:
if (i != 0)
goto refill;
mod_timer(&np->rx_refill_timer,
jiffies + (HZ/10));
break;
}
__skb_fill_page_desc(skb, 0, page, 0, 0);
skb_shinfo(skb)->nr_frags = 1;
__skb_queue_tail(&np->rx_batch, skb);
}
if (i < (np->rx_target/2)) {
if (req_prod > np->rx.sring->req_prod)
goto push;
return;
}
if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) &&
((np->rx_target *= 2) > np->rx_max_target))
np->rx_target = np->rx_max_target;
refill:
for (i = 0; ; i++) {
skb = __skb_dequeue(&np->rx_batch);
if (skb == NULL)
break;
skb->dev = dev;
id = xennet_rxidx(req_prod + i);
BUG_ON(np->rx_skbs[id]);
np->rx_skbs[id] = skb;
ref = gnttab_claim_grant_reference(&np->gref_rx_head);
BUG_ON((signed short)ref < 0);
np->grant_rx_ref[id] = ref;
pfn = page_to_pfn(skb_frag_page(&skb_shinfo(skb)->frags[0]));
vaddr = page_address(skb_frag_page(&skb_shinfo(skb)->frags[0]));
req = RING_GET_REQUEST(&np->rx, req_prod + i);
gnttab_grant_foreign_access_ref(ref,
np->xbdev->otherend_id,
pfn_to_mfn(pfn),
0);
req->id = id;
req->gref = ref;
}
wmb();
np->rx.req_prod_pvt = req_prod + i;
push:
RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify);
if (notify)
notify_remote_via_irq(np->netdev->irq);
}
示例15: skb_scatterlist
/** Convert a (possibly fragmented) skb into a scatter list.
*
* @param skb skb to convert
* @param sg scatterlist to set up
* @param sg_n size of sg on input, number of elements set on output
* @param offset offset into data to start at
* @param len number of bytes
* @return 0 on success, error code otherwise
*/
int skb_scatterlist(struct sk_buff *skb, struct scatterlist *sg, int *sg_n,
int offset, int len){
int err = 0;
int start; // No. of bytes copied so far (where next copy starts).
int size; // Size of the next chunk.
int end; // Where the next chunk ends (start + size).
int copy; // Number of bytes to copy in one operation.
int sg_i = 0; // Index into sg.
int i;
if(DEBUG_SCATTERLIST){
dprintf("> offset=%d len=%d (end=%d), skb len=%d,\n",
offset, len, offset+len, skb->len);
}
start = 0;
size = skb_headlen(skb);
end = start + size;
copy = end - offset;
if(copy > 0){
char *p;
if(copy > len) copy = len;
if(sg_i >= *sg_n){
err = -EINVAL;
goto exit;
}
p = skb->data + offset;
SET_SCATTER_ADDR(sg[sg_i], NULL);
sg[sg_i].page = virt_to_page(p);
sg[sg_i].offset = ((unsigned long)p & ~PAGE_MASK);
sg[sg_i].length = copy;
if(DEBUG_SCATTERLIST){
dprintf("> sg_i=%d .page=%p .offset=%u .length=%d\n",
sg_i, sg[sg_i].page, sg[sg_i].offset, sg[sg_i].length);
}
sg_i++;
if((len -= copy) == 0) goto exit;
offset += copy;
}
start = end;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++){
BUG_TRAP(start <= offset + len);
size = skb_shinfo(skb)->frags[i].size;
end = start + size;
copy = end - offset;
if(copy > 0){
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
if(copy > len) copy = len;
if(sg_i >= *sg_n){
err = -EINVAL;
goto exit;
}
SET_SCATTER_ADDR(sg[sg_i], NULL);
sg[sg_i].page = frag->page;
sg[sg_i].offset = frag->page_offset + offset - start;
sg[sg_i].length = copy;
if(DEBUG_SCATTERLIST){
dprintf("> sg_i=%d .page=%p .offset=%u .length=%d\n",
sg_i, sg[sg_i].page, sg[sg_i].offset, sg[sg_i].length);
}
sg_i++;
if((len -= copy) == 0) goto exit;
offset += copy;
}
start = end;
}
exit:
if(!err) *sg_n = sg_i;
if(len) wprintf("> len=%d\n", len);
if(len) BUG();
if(err) dprintf("< err=%d sg_n=%d\n", err, *sg_n);
return err;
}