本文整理汇总了C++中skb_checksum_help函数的典型用法代码示例。如果您正苦于以下问题:C++ skb_checksum_help函数的具体用法?C++ skb_checksum_help怎么用?C++ skb_checksum_help使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了skb_checksum_help函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: xfrm6_output_one
static int xfrm6_output_one(struct sk_buff *skb)
{
struct dst_entry *dst = skb->dst;
struct xfrm_state *x = dst->xfrm;
int err;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
err = skb_checksum_help(skb);
if (err)
goto error_nolock;
}
if (x->props.mode == XFRM_MODE_TUNNEL) {
err = xfrm6_tunnel_check_size(skb);
if (err)
goto error_nolock;
}
do {
spin_lock_bh(&x->lock);
err = xfrm_state_check(x, skb);
if (err)
goto error;
err = x->mode->output(x, skb);
if (err)
goto error;
err = x->type->output(x, skb);
if (err)
goto error;
x->curlft.bytes += skb->len;
x->curlft.packets++;
if (x->props.mode == XFRM_MODE_ROUTEOPTIMIZATION)
x->lastused = (u64)xtime.tv_sec;
spin_unlock_bh(&x->lock);
skb->nh.raw = skb->data;
if (!(skb->dst = dst_pop(dst))) {
err = -EHOSTUNREACH;
goto error_nolock;
}
dst = skb->dst;
x = dst->xfrm;
} while (x && (x->props.mode != XFRM_MODE_TUNNEL));
IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED;
err = 0;
out_exit:
return err;
error:
spin_unlock_bh(&x->lock);
error_nolock:
kfree_skb(skb);
goto out_exit;
}
示例2: skb_reset_inner_headers
struct sk_buff *gre_handle_offloads(struct sk_buff *skb, bool gre_csum)
{
int err;
if (likely(!skb->encapsulation)) {
skb_reset_inner_headers(skb);
skb->encapsulation = 1;
}
if (skb_is_gso(skb)) {
err = skb_unclone(skb, GFP_ATOMIC);
if (unlikely(err))
goto error;
skb_shinfo(skb)->gso_type |= SKB_GSO_GRE;
return skb;
} else if (skb->ip_summed == CHECKSUM_PARTIAL && gre_csum) {
err = skb_checksum_help(skb);
if (unlikely(err))
goto error;
} else if (skb->ip_summed != CHECKSUM_PARTIAL)
skb->ip_summed = CHECKSUM_NONE;
return skb;
error:
kfree_skb(skb);
return ERR_PTR(err);
}
示例3: rpl_ip6_local_out
int rpl_ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
{
if (!OVS_GSO_CB(skb)->fix_segment)
return output_ipv6(skb);
if (skb_is_gso(skb)) {
int ret;
skb = tnl_skb_gso_segment(skb, 0, false, AF_INET6);
if (!skb || IS_ERR(skb))
return NET_XMIT_DROP;
do {
struct sk_buff *next_skb = skb->next;
skb->next = NULL;
ret = output_ipv6(skb);
skb = next_skb;
} while (skb);
return ret;
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
int err;
err = skb_checksum_help(skb);
if (unlikely(err))
return NET_XMIT_DROP;
}
return output_ipv6(skb);
}
示例4: xfrm_output
int xfrm_output(struct sock *sk, struct sk_buff *skb)
{
struct net *net = dev_net(skb_dst(skb)->dev);
struct xfrm_state *x = skb_dst(skb)->xfrm;
int err;
secpath_reset(skb);
if (xfrm_dev_offload_ok(skb, x)) {
struct sec_path *sp;
sp = secpath_dup(skb->sp);
if (!sp) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
kfree_skb(skb);
return -ENOMEM;
}
if (skb->sp)
secpath_put(skb->sp);
skb->sp = sp;
skb->encapsulation = 1;
sp->olen++;
sp->xvec[skb->sp->len++] = x;
xfrm_state_hold(x);
if (skb_is_gso(skb)) {
skb_shinfo(skb)->gso_type |= SKB_GSO_ESP;
return xfrm_output2(net, sk, skb);
}
if (x->xso.dev && x->xso.dev->features & NETIF_F_HW_ESP_TX_CSUM)
goto out;
}
if (skb_is_gso(skb))
return xfrm_output_gso(net, sk, skb);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
err = skb_checksum_help(skb);
if (err) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
kfree_skb(skb);
return err;
}
}
out:
return xfrm_output2(net, sk, skb);
}
示例5: ovs_iptunnel_handle_offloads
int ovs_iptunnel_handle_offloads(struct sk_buff *skb,
bool csum_help, int gso_type_mask,
void (*fix_segment)(struct sk_buff *))
{
int err;
if (likely(!skb_is_encapsulated(skb))) {
skb_reset_inner_headers(skb);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0)
skb->encapsulation = 1;
#endif
} else if (skb_is_gso(skb)) {
err = -ENOSYS;
goto error;
}
#if LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0)
if (gso_type_mask)
fix_segment = NULL;
OVS_GSO_CB(skb)->fix_segment = fix_segment;
#endif
if (skb_is_gso(skb)) {
err = skb_unclone(skb, GFP_ATOMIC);
if (unlikely(err))
goto error;
skb_shinfo(skb)->gso_type |= gso_type_mask;
return 0;
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0)
/* If packet is not gso and we are resolving any partial checksum,
* clear encapsulation flag. This allows setting CHECKSUM_PARTIAL
* on the outer header without confusing devices that implement
* NETIF_F_IP_CSUM with encapsulation.
*/
if (csum_help)
skb->encapsulation = 0;
#endif
if (skb->ip_summed == CHECKSUM_PARTIAL && csum_help) {
err = skb_checksum_help(skb);
if (unlikely(err))
goto error;
} else if (skb->ip_summed != CHECKSUM_PARTIAL)
skb->ip_summed = CHECKSUM_NONE;
return 0;
error:
return err;
}
示例6: rtw_set_tx_chksum_offload
void rtw_set_tx_chksum_offload(_pkt *pkt, struct pkt_attrib *pattrib)
{
#ifdef CONFIG_TCP_CSUM_OFFLOAD_TX
struct sk_buff *skb = (struct sk_buff *)pkt;
pattrib->hw_tcp_csum = 0;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
if (skb_shinfo(skb)->nr_frags == 0)
{
const struct iphdr *ip = ip_hdr(skb);
if (ip->protocol == IPPROTO_TCP) {
// TCP checksum offload by HW
DBG_871X("CHECKSUM_PARTIAL TCP\n");
pattrib->hw_tcp_csum = 1;
//skb_checksum_help(skb);
} else if (ip->protocol == IPPROTO_UDP) {
//DBG_871X("CHECKSUM_PARTIAL UDP\n");
#if 1
skb_checksum_help(skb);
#else
// Set UDP checksum = 0 to skip checksum check
struct udphdr *udp = skb_transport_header(skb);
udp->check = 0;
#endif
} else {
DBG_871X("%s-%d TCP CSUM offload Error!!\n", __FUNCTION__, __LINE__);
WARN_ON(1); /* we need a WARN() */
}
}
else { // IP fragmentation case
DBG_871X("%s-%d nr_frags != 0, using skb_checksum_help(skb);!!\n", __FUNCTION__, __LINE__);
skb_checksum_help(skb);
}
}
#endif
}
示例7: void
struct sk_buff *ovs_iptunnel_handle_offloads(struct sk_buff *skb,
bool csum_help,
void (*fix_segment)(struct sk_buff *))
{
int err;
/* XXX: synchronize inner header reset for compat and non compat code
* so that we can do it here.
*/
/*
skb_reset_inner_headers(skb);
*/
/* OVS compat code does not maintain encapsulation bit.
* skb->encapsulation = 1; */
if (skb_is_gso(skb)) {
if (skb_is_encapsulated(skb)) {
err = -ENOSYS;
goto error;
}
OVS_GSO_CB(skb)->fix_segment = fix_segment;
return skb;
}
/* If packet is not gso and we are resolving any partial checksum,
* clear encapsulation flag. This allows setting CHECKSUM_PARTIAL
* on the outer header without confusing devices that implement
* NETIF_F_IP_CSUM with encapsulation.
*/
/*
if (csum_help)
skb->encapsulation = 0;
*/
if (skb->ip_summed == CHECKSUM_PARTIAL && csum_help) {
err = skb_checksum_help(skb);
if (unlikely(err))
goto error;
} else if (skb->ip_summed != CHECKSUM_PARTIAL)
skb->ip_summed = CHECKSUM_NONE;
return skb;
error:
kfree_skb(skb);
return ERR_PTR(err);
}
示例8: rpl_iptunnel_handle_offloads
int rpl_iptunnel_handle_offloads(struct sk_buff *skb,
bool csum_help,
int gso_type_mask)
#endif
{
int err;
if (likely(!skb->encapsulation)) {
skb_reset_inner_headers(skb);
skb->encapsulation = 1;
}
if (skb_is_gso(skb)) {
err = skb_unclone(skb, GFP_ATOMIC);
if (unlikely(err))
goto error;
skb_shinfo(skb)->gso_type |= gso_type_mask;
goto out;
}
/* If packet is not gso and we are resolving any partial checksum,
* clear encapsulation flag. This allows setting CHECKSUM_PARTIAL
* on the outer header without confusing devices that implement
* NETIF_F_IP_CSUM with encapsulation.
*/
if (csum_help)
skb->encapsulation = 0;
if (skb->ip_summed == CHECKSUM_PARTIAL && csum_help) {
err = skb_checksum_help(skb);
if (unlikely(err))
goto error;
} else if (skb->ip_summed != CHECKSUM_PARTIAL)
skb->ip_summed = CHECKSUM_NONE;
#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)
out:
return skb;
error:
kfree_skb(skb);
return ERR_PTR(err);
#else
out:
error:
return 0;
#endif
}
示例9: rpl_ip_local_out
int rpl_ip_local_out(struct sk_buff *skb)
{
int ret = NETDEV_TX_OK;
int id = -1;
if (skb_is_gso(skb)) {
struct iphdr *iph;
iph = ip_hdr(skb);
id = ntohs(iph->id);
skb = tnl_skb_gso_segment(skb, 0, false);
if (!skb || IS_ERR(skb))
return 0;
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
int err;
err = skb_checksum_help(skb);
if (unlikely(err))
return 0;
}
while (skb) {
struct sk_buff *next_skb = skb->next;
struct iphdr *iph;
int err;
skb->next = NULL;
iph = ip_hdr(skb);
if (id >= 0)
iph->id = htons(id++);
memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
#undef ip_local_out
err = ip_local_out(skb);
if (unlikely(net_xmit_eval(err)))
ret = err;
skb = next_skb;
}
return ret;
}
示例10: xfrm_output
int xfrm_output(struct sk_buff *skb)
{
struct net *net = dev_net(skb_dst(skb)->dev);
int err;
if (skb_is_gso(skb))
return xfrm_output_gso(skb);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
err = skb_checksum_help(skb);
if (err) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
kfree_skb(skb);
return err;
}
}
return xfrm_output2(skb);
}
示例11: kernel_dev_xmit
static netdev_tx_t
kernel_dev_xmit(struct sk_buff *skb,
struct net_device *dev)
{
int err;
netif_stop_queue(dev);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
err = skb_checksum_help(skb);
if (unlikely(err)) {
pr_err("checksum error (%d)\n", err);
return 0;
}
}
lib_dev_xmit((struct SimDevice *)dev, skb->data, skb->len);
dev_kfree_skb(skb);
netif_wake_queue(dev);
return 0;
}
示例12: skb_reset_inner_headers
struct sk_buff *gre_handle_offloads(struct sk_buff *skb, bool gre_csum)
{
int err;
skb_reset_inner_headers(skb);
if (skb_is_gso(skb)) {
if (gre_csum)
OVS_GSO_CB(skb)->fix_segment = gre_csum_fix;
} else {
if (skb->ip_summed == CHECKSUM_PARTIAL && gre_csum) {
err = skb_checksum_help(skb);
if (err)
goto error;
} else if (skb->ip_summed != CHECKSUM_PARTIAL)
skb->ip_summed = CHECKSUM_NONE;
}
return skb;
error:
kfree_skb(skb);
return ERR_PTR(err);
}
示例13: rpl_ip_local_out
int rpl_ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
{
if (!OVS_GSO_CB(skb)->fix_segment)
return output_ip(skb);
/* This bit set can confuse some drivers on old kernel. */
skb->encapsulation = 0;
if (skb_is_gso(skb)) {
int ret;
int id;
skb = tnl_skb_gso_segment(skb, 0, false, AF_INET);
if (!skb || IS_ERR(skb))
return NET_XMIT_DROP;
id = ntohs(ip_hdr(skb)->id);
do {
struct sk_buff *next_skb = skb->next;
skb->next = NULL;
ip_hdr(skb)->id = htons(id++);
ret = output_ip(skb);
skb = next_skb;
} while (skb);
return ret;
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
int err;
err = skb_checksum_help(skb);
if (unlikely(err))
return NET_XMIT_DROP;
}
return output_ip(skb);
}
示例14: skb_unclone
static struct sk_buff *handle_offloads(struct ip_tunnel *tunnel, struct sk_buff *skb)
{
int err;
if (skb_is_gso(skb)) {
err = skb_unclone(skb, GFP_ATOMIC);
if (unlikely(err))
goto error;
skb_shinfo(skb)->gso_type |= SKB_GSO_GRE;
return skb;
} else if (skb->ip_summed == CHECKSUM_PARTIAL &&
tunnel->parms.o_flags&TUNNEL_CSUM) {
err = skb_checksum_help(skb);
if (unlikely(err))
goto error;
} else if (skb->ip_summed != CHECKSUM_PARTIAL)
skb->ip_summed = CHECKSUM_NONE;
return skb;
error:
kfree_skb(skb);
return ERR_PTR(err);
}
示例15: netvsc_start_xmit
//.........这里部分代码省略.........
vlan->pri = (skb->vlan_tci & VLAN_PRIO_MASK) >>
VLAN_PRIO_SHIFT;
}
net_trans_info = get_net_transport_info(skb, &hdr_offset);
/*
* Setup the sendside checksum offload only if this is not a
* GSO packet.
*/
if ((net_trans_info & (INFO_TCP | INFO_UDP)) && skb_is_gso(skb)) {
struct ndis_tcp_lso_info *lso_info;
rndis_msg_size += NDIS_LSO_PPI_SIZE;
ppi = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,
TCP_LARGESEND_PKTINFO);
lso_info = (struct ndis_tcp_lso_info *)((void *)ppi +
ppi->ppi_offset);
lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
if (net_trans_info & (INFO_IPV4 << 16)) {
lso_info->lso_v2_transmit.ip_version =
NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
ip_hdr(skb)->tot_len = 0;
ip_hdr(skb)->check = 0;
tcp_hdr(skb)->check =
~csum_tcpudp_magic(ip_hdr(skb)->saddr,
ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
} else {
lso_info->lso_v2_transmit.ip_version =
NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
ipv6_hdr(skb)->payload_len = 0;
tcp_hdr(skb)->check =
~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
}
lso_info->lso_v2_transmit.tcp_header_offset = hdr_offset;
lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
if (net_trans_info & INFO_TCP) {
rndis_msg_size += NDIS_CSUM_PPI_SIZE;
ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
TCPIP_CHKSUM_PKTINFO);
csum_info = (struct ndis_tcp_ip_checksum_info *)((void *)ppi +
ppi->ppi_offset);
if (net_trans_info & (INFO_IPV4 << 16))
csum_info->transmit.is_ipv4 = 1;
else
csum_info->transmit.is_ipv6 = 1;
csum_info->transmit.tcp_checksum = 1;
csum_info->transmit.tcp_header_offset = hdr_offset;
} else {
/* UDP checksum (and other) offload is not supported. */
if (skb_checksum_help(skb))
goto drop;
}
}
/* Start filling in the page buffers with the rndis hdr */
rndis_msg->msg_len += rndis_msg_size;
packet->total_data_buflen = rndis_msg->msg_len;
packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
skb, packet, &pb);
/* timestamp packet in software */
skb_tx_timestamp(skb);
ret = netvsc_send(net_device_ctx->device_ctx, packet,
rndis_msg, &pb, skb);
if (likely(ret == 0)) {
struct netvsc_stats *tx_stats = this_cpu_ptr(net_device_ctx->tx_stats);
u64_stats_update_begin(&tx_stats->syncp);
tx_stats->packets++;
tx_stats->bytes += skb_length;
u64_stats_update_end(&tx_stats->syncp);
return NETDEV_TX_OK;
}
if (ret == -EAGAIN) {
++net_device_ctx->eth_stats.tx_busy;
return NETDEV_TX_BUSY;
}
if (ret == -ENOSPC)
++net_device_ctx->eth_stats.tx_no_space;
drop:
dev_kfree_skb_any(skb);
net->stats.tx_dropped++;
return NETDEV_TX_OK;
no_memory:
++net_device_ctx->eth_stats.tx_no_memory;
goto drop;
}