本文整理汇总了C++中skb_cow函数的典型用法代码示例。如果您正苦于以下问题:C++ skb_cow函数的具体用法?C++ skb_cow怎么用?C++ skb_cow使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了skb_cow函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1:
static struct sk_buff *vlan_reorder_header(struct sk_buff *skb)
{
if (skb_cow(skb, skb_headroom(skb)) < 0)
return NULL;
memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN);
skb->mac_header += VLAN_HLEN;
skb_reset_mac_len(skb);
return skb;
}
示例2: mt7601u_skb_rooms
static int mt7601u_skb_rooms(struct mt7601u_dev *dev, struct sk_buff *skb)
{
int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
u32 need_head;
need_head = sizeof(struct mt76_txwi) + 4;
if (hdr_len % 4)
need_head += 2;
return skb_cow(skb, need_head);
}
示例3: ip_rcv_options
static inline int ip_rcv_options(struct sk_buff *skb)
{
struct ip_options *opt;
struct iphdr *iph;
struct net_device *dev = skb->dev;
/* It looks as overkill, because not all
IP options require packet mangling.
But it is the easiest for now, especially taking
into account that combination of IP options
and running sniffer is extremely rare condition.
--ANK (980813)
*/
if (skb_cow(skb, skb_headroom(skb))) {
IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS);
goto drop;
}
iph = ip_hdr(skb);
opt = &(IPCB(skb)->opt);
opt->optlen = iph->ihl*4 - sizeof(struct iphdr);
if (ip_options_compile(dev_net(dev), opt, skb)) {
IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INHDRERRORS);
goto drop;
}
if (unlikely(opt->srr)) {
struct in_device *in_dev = in_dev_get(dev);
if (in_dev) {
if (!IN_DEV_SOURCE_ROUTE(in_dev)) {
if (IN_DEV_LOG_MARTIANS(in_dev) &&
net_ratelimit())
printk(KERN_INFO "source route option "
NIPQUAD_FMT " -> " NIPQUAD_FMT "\n",
NIPQUAD(iph->saddr),
NIPQUAD(iph->daddr));
in_dev_put(in_dev);
goto drop;
}
in_dev_put(in_dev);
}
if (ip_options_rcv_srr(skb))
goto drop;
}
return 0;
drop:
return -1;
}
示例4: nf_bridge_copy_header
/*
* When forwarding bridge frames, we save a copy of the original
* header before processing.
*/
int nf_bridge_copy_header(struct sk_buff *skb)
{
int err;
int header_size = ETH_HLEN + nf_bridge_encap_header_len(skb);
err = skb_cow(skb, header_size);
if (err)
return err;
skb_copy_to_linear_data_offset(skb, -header_size,
skb->nf_bridge->data, header_size);
__skb_push(skb, nf_bridge_encap_header_len(skb));
return 0;
}
示例5: mapi_ip_local_deliver
static inline struct sk_buff *mapi_ip_rcv_finish(struct sk_buff *skb,struct predef_func *pf)
{
struct iphdr *iph = skb->nh.iph;
struct cook_ip_struct *cis = (struct cook_ip_struct *)pf->data;
if(iph->ihl > 5)
{
/*
* It looks as overkill, because not all
* IP options require packet mangling.
* But it is the easiest for now, especially taking
* into account that combination of IP options
* and running sniffer is extremely rare condition.
* --ANK (980813)
*/
if(skb_cow(skb,skb_headroom(skb)))
{
goto drop;
}
iph = skb->nh.iph;
skb->ip_summed = 0;
if(ip_options_compile(NULL,skb))
{
goto inhdr_error;
}
}
return mapi_ip_local_deliver(skb,pf);
inhdr_error:
spin_lock(&pf->data_lock);
cis->ip_options_errors++;
spin_unlock(&pf->data_lock);
MAPI_DEBUG(if(net_ratelimit())
printk("COOK_IP : IP options error : %u.%u.%u.%u <- %u.%u.%u.%u\n",
NIPQUAD(skb->nh.iph->daddr),
NIPQUAD(skb->nh.iph->saddr)));
drop:
kfree_skb(skb);
return NULL;
}
示例6:
static inline struct sk_buff *vlan_check_reorder_header(struct sk_buff *skb)
{
if (vlan_dev_info(skb->dev)->flags & VLAN_FLAG_REORDER_HDR) {
if (skb_cow(skb, skb_headroom(skb)) < 0)
skb = NULL;
if (skb) {
/* Lifted from Gleb's VLAN code... */
memmove(skb->data - ETH_HLEN,
skb->data - VLAN_ETH_HLEN, 12);
skb->mac_header += VLAN_HLEN;
}
}
return skb;
}
示例7: x25_data_indication
static int x25_data_indication(struct net_device *dev, struct sk_buff *skb)
{
unsigned char *ptr;
skb_push(skb, 1);
if (skb_cow(skb, 1))
return NET_RX_DROP;
ptr = skb->data;
*ptr = X25_IFACE_DATA;
skb->protocol = x25_type_trans(skb, dev);
return netif_rx(skb);
}
示例8: nf_bridge_copy_header
/*
* When forwarding bridge frames, we save a copy of the original
* header before processing.
*/
int nf_bridge_copy_header(struct sk_buff *skb)
{
int err;
int header_size = ETH_HLEN;
if (skb->protocol == htons(ETH_P_8021Q))
header_size += VLAN_HLEN;
err = skb_cow(skb, header_size);
if (err)
return err;
memcpy(skb->data - header_size, skb->nf_bridge->data, header_size);
if (skb->protocol == htons(ETH_P_8021Q))
__skb_push(skb, VLAN_HLEN);
return 0;
}
示例9: mt76_insert_hdr_pad
int mt76_insert_hdr_pad(struct sk_buff *skb)
{
int len = ieee80211_get_hdrlen_from_skb(skb);
int ret;
if (len % 4 == 0)
return 0;
ret = skb_cow(skb, 2);
if (ret)
return ret;
skb_push(skb, 2);
memmove(skb->data, skb->data + 2, len);
skb->data[len] = 0;
skb->data[len + 1] = 0;
return 0;
}
示例10: comxlapb_data_indication
static int comxlapb_data_indication(void *token, struct sk_buff *skb)
{
struct comx_channel *ch = token;
if (ch->dev->type == ARPHRD_X25) {
skb_push(skb, 1);
if (skb_cow(skb, 1))
return NET_RX_DROP;
skb->data[0] = 0; // indicate data for X25
skb->protocol = htons(ETH_P_X25);
} else {
skb->protocol = htons(ETH_P_IP);
}
skb->dev = ch->dev;
skb->mac.raw = skb->data;
return comx_rx(ch->dev, skb);
}
示例11: ip6_forward
//.........这里部分代码省略.........
}
/* XXX: idev->cnf.proxy_ndp? */
if (net->ipv6.devconf_all->proxy_ndp &&
pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) {
int proxied = ip6_forward_proxy_check(skb);
if (proxied > 0)
return ip6_input(skb);
else if (proxied < 0) {
IP6_INC_STATS(net, ip6_dst_idev(dst),
IPSTATS_MIB_INDISCARDS);
goto drop;
}
}
if (!xfrm6_route_forward(skb)) {
IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
goto drop;
}
dst = skb_dst(skb);
/* IPv6 specs say nothing about it, but it is clear that we cannot
send redirects to source routed frames.
We don't send redirects to frames decapsulated from IPsec.
*/
n = dst_get_neighbour(dst);
if (skb->dev == dst->dev && n && opt->srcrt == 0 && !skb_sec_path(skb)) {
struct in6_addr *target = NULL;
struct rt6_info *rt;
/*
* incoming and outgoing devices are the same
* send a redirect.
*/
rt = (struct rt6_info *) dst;
if ((rt->rt6i_flags & RTF_GATEWAY))
target = (struct in6_addr*)&n->primary_key;
else
target = &hdr->daddr;
if (!rt->rt6i_peer)
rt6_bind_peer(rt, 1);
/* Limit redirects both by destination (here)
and by source (inside ndisc_send_redirect)
*/
if (inet_peer_xrlim_allow(rt->rt6i_peer, 1*HZ))
ndisc_send_redirect(skb, n, target);
} else {
int addrtype = ipv6_addr_type(&hdr->saddr);
/* This check is security critical. */
if (addrtype == IPV6_ADDR_ANY ||
addrtype & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LOOPBACK))
goto error;
if (addrtype & IPV6_ADDR_LINKLOCAL) {
icmpv6_send(skb, ICMPV6_DEST_UNREACH,
ICMPV6_NOT_NEIGHBOUR, 0);
goto error;
}
}
mtu = dst_mtu(dst);
if (mtu < IPV6_MIN_MTU)
mtu = IPV6_MIN_MTU;
if (skb->len > mtu && !skb_is_gso(skb)) {
/* Again, force OUTPUT device used as source address */
skb->dev = dst->dev;
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
IP6_INC_STATS_BH(net,
ip6_dst_idev(dst), IPSTATS_MIB_INTOOBIGERRORS);
IP6_INC_STATS_BH(net,
ip6_dst_idev(dst), IPSTATS_MIB_FRAGFAILS);
kfree_skb(skb);
return -EMSGSIZE;
}
if (skb_cow(skb, dst->dev->hard_header_len)) {
IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTDISCARDS);
goto drop;
}
hdr = ipv6_hdr(skb);
/* Mangling hops number delayed to point after skb COW */
hdr->hop_limit--;
IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dst->dev,
ip6_forward_finish);
error:
IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS);
drop:
kfree_skb(skb);
return -EINVAL;
}
示例12: udp_uncompress
static int udp_uncompress(struct sk_buff *skb, size_t needed)
{
u8 tmp = 0, val = 0;
struct udphdr uh;
bool fail;
int err;
fail = lowpan_fetch_skb(skb, &tmp, sizeof(tmp));
pr_debug("UDP header uncompression\n");
switch (tmp & LOWPAN_NHC_UDP_CS_P_11) {
case LOWPAN_NHC_UDP_CS_P_00:
fail |= lowpan_fetch_skb(skb, &uh.source, sizeof(uh.source));
fail |= lowpan_fetch_skb(skb, &uh.dest, sizeof(uh.dest));
break;
case LOWPAN_NHC_UDP_CS_P_01:
fail |= lowpan_fetch_skb(skb, &uh.source, sizeof(uh.source));
fail |= lowpan_fetch_skb(skb, &val, sizeof(val));
uh.dest = htons(val + LOWPAN_NHC_UDP_8BIT_PORT);
break;
case LOWPAN_NHC_UDP_CS_P_10:
fail |= lowpan_fetch_skb(skb, &val, sizeof(val));
uh.source = htons(val + LOWPAN_NHC_UDP_8BIT_PORT);
fail |= lowpan_fetch_skb(skb, &uh.dest, sizeof(uh.dest));
break;
case LOWPAN_NHC_UDP_CS_P_11:
fail |= lowpan_fetch_skb(skb, &val, sizeof(val));
uh.source = htons(LOWPAN_NHC_UDP_4BIT_PORT + (val >> 4));
uh.dest = htons(LOWPAN_NHC_UDP_4BIT_PORT + (val & 0x0f));
break;
default:
BUG();
}
pr_debug("uncompressed UDP ports: src = %d, dst = %d\n",
ntohs(uh.source), ntohs(uh.dest));
/* checksum */
if (tmp & LOWPAN_NHC_UDP_CS_C) {
pr_debug_ratelimited("checksum elided currently not supported\n");
fail = true;
} else {
fail |= lowpan_fetch_skb(skb, &uh.check, sizeof(uh.check));
}
if (fail)
return -EINVAL;
/* UDP length needs to be infered from the lower layers
* here, we obtain the hint from the remaining size of the
* frame
*/
switch (lowpan_priv(skb->dev)->lltype) {
case LOWPAN_LLTYPE_IEEE802154:
if (lowpan_802154_cb(skb)->d_size)
uh.len = htons(lowpan_802154_cb(skb)->d_size -
sizeof(struct ipv6hdr));
else
uh.len = htons(skb->len + sizeof(struct udphdr));
break;
default:
uh.len = htons(skb->len + sizeof(struct udphdr));
break;
}
pr_debug("uncompressed UDP length: src = %d", ntohs(uh.len));
/* replace the compressed UDP head by the uncompressed UDP
* header
*/
err = skb_cow(skb, needed);
if (unlikely(err))
return err;
skb_push(skb, sizeof(struct udphdr));
skb_copy_to_linear_data(skb, &uh, sizeof(struct udphdr));
return 0;
}
示例13: mpls_forward
static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
struct net *net = dev_net(dev);
struct mpls_shim_hdr *hdr;
struct mpls_route *rt;
struct mpls_entry_decoded dec;
struct net_device *out_dev;
struct mpls_dev *mdev;
unsigned int hh_len;
unsigned int new_header_size;
unsigned int mtu;
int err;
/* Careful this entire function runs inside of an rcu critical section */
mdev = mpls_dev_get(dev);
if (!mdev || !mdev->input_enabled)
goto drop;
if (skb->pkt_type != PACKET_HOST)
goto drop;
if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
goto drop;
if (!pskb_may_pull(skb, sizeof(*hdr)))
goto drop;
/* Read and decode the label */
hdr = mpls_hdr(skb);
dec = mpls_entry_decode(hdr);
/* Pop the label */
skb_pull(skb, sizeof(*hdr));
skb_reset_network_header(skb);
skb_orphan(skb);
rt = mpls_route_input_rcu(net, dec.label);
if (!rt)
goto drop;
/* Find the output device */
out_dev = rcu_dereference(rt->rt_dev);
if (!mpls_output_possible(out_dev))
goto drop;
if (skb_warn_if_lro(skb))
goto drop;
skb_forward_csum(skb);
/* Verify ttl is valid */
if (dec.ttl <= 1)
goto drop;
dec.ttl -= 1;
/* Verify the destination can hold the packet */
new_header_size = mpls_rt_header_size(rt);
mtu = mpls_dev_mtu(out_dev);
if (mpls_pkt_too_big(skb, mtu - new_header_size))
goto drop;
hh_len = LL_RESERVED_SPACE(out_dev);
if (!out_dev->header_ops)
hh_len = 0;
/* Ensure there is enough space for the headers in the skb */
if (skb_cow(skb, hh_len + new_header_size))
goto drop;
skb->dev = out_dev;
skb->protocol = htons(ETH_P_MPLS_UC);
if (unlikely(!new_header_size && dec.bos)) {
/* Penultimate hop popping */
if (!mpls_egress(rt, skb, dec))
goto drop;
} else {
bool bos;
int i;
skb_push(skb, new_header_size);
skb_reset_network_header(skb);
/* Push the new labels */
hdr = mpls_hdr(skb);
bos = dec.bos;
for (i = rt->rt_labels - 1; i >= 0; i--) {
hdr[i] = mpls_entry_encode(rt->rt_label[i], dec.ttl, 0, bos);
bos = false;
}
}
err = neigh_xmit(rt->rt_via_table, out_dev, rt->rt_via, skb);
if (err)
net_dbg_ratelimited("%s: packet transmission failed: %d\n",
__func__, err);
return 0;
drop:
//.........这里部分代码省略.........
示例14: ip_forward
int ip_forward(struct sk_buff *skb)
{
struct iphdr *iph; /* Our header */
struct rtable *rt; /* Route we use */
struct ip_options * opt = &(IPCB(skb)->opt);
if (skb_warn_if_lro(skb))
goto drop;
if (!xfrm4_policy_check(NULL, XFRM_POLICY_FWD, skb))
goto drop;
if (IPCB(skb)->opt.router_alert && ip_call_ra_chain(skb))
return NET_RX_SUCCESS;
if (skb->pkt_type != PACKET_HOST)
goto drop;
skb_forward_csum(skb);
/*
* According to the RFC, we must first decrease the TTL field. If
* that reaches zero, we must reply an ICMP control message telling
* that the packet's lifetime expired.
*/
if (ip_hdr(skb)->ttl <= 1)
goto too_many_hops;
if (!xfrm4_route_forward(skb))
goto drop;
rt = skb_rtable(skb);
if (opt->is_strictroute && rt->rt_dst != rt->rt_gateway)
goto sr_failed;
if (unlikely(skb->len > dst_mtu(&rt->u.dst) && !skb_is_gso(skb) &&
(ip_hdr(skb)->frag_off & htons(IP_DF))) && !skb->local_df) {
IP_INC_STATS(dev_net(rt->u.dst.dev), IPSTATS_MIB_FRAGFAILS);
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
htonl(dst_mtu(&rt->u.dst)));
goto drop;
}
/* We are about to mangle packet. Copy it! */
if (skb_cow(skb, LL_RESERVED_SPACE(rt->u.dst.dev)+rt->u.dst.header_len))
goto drop;
iph = ip_hdr(skb);
/* Decrease ttl after skb cow done */
ip_decrease_ttl(iph);
/*
* We now generate an ICMP HOST REDIRECT giving the route
* we calculated.
*/
if (rt->rt_flags&RTCF_DOREDIRECT && !opt->srr && !skb_sec_path(skb))
ip_rt_send_redirect(skb);
skb->priority = rt_tos2priority(iph->tos);
return NF_HOOK(PF_INET, NF_INET_FORWARD, skb, skb->dev, rt->u.dst.dev,
ip_forward_finish);
sr_failed:
/*
* Strict routing permits no gatewaying
*/
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_SR_FAILED, 0);
goto drop;
too_many_hops:
/* Tell the sender its packet died... */
IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_INHDRERRORS);
icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, 0);
drop:
kfree_skb(skb);
return NET_RX_DROP;
}
示例15: SetupNextSend
INT SetupNextSend(PMINI_ADAPTER Adapter, struct sk_buff *Packet, USHORT Vcid)
{
int status=0;
BOOLEAN bHeaderSupressionEnabled = FALSE;
B_UINT16 uiClassifierRuleID;
u16 QueueIndex = skb_get_queue_mapping(Packet);
LEADER Leader={0};
if(Packet->len > MAX_DEVICE_DESC_SIZE)
{
status = STATUS_FAILURE;
goto errExit;
}
uiClassifierRuleID = *((UINT32*) (Packet->cb)+SKB_CB_CLASSIFICATION_OFFSET);
bHeaderSupressionEnabled = Adapter->PackInfo[QueueIndex].bHeaderSuppressionEnabled
& Adapter->bPHSEnabled;
if(Adapter->device_removed)
{
status = STATUS_FAILURE;
goto errExit;
}
status = PHSTransmit(Adapter, &Packet, Vcid, uiClassifierRuleID, bHeaderSupressionEnabled,
(UINT *)&Packet->len, Adapter->PackInfo[QueueIndex].bEthCSSupport);
if(status != STATUS_SUCCESS)
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "PHS Transmit failed..\n");
goto errExit;
}
Leader.Vcid = Vcid;
if(TCP_ACK == *((UINT32*) (Packet->cb) + SKB_CB_TCPACK_OFFSET ))
Leader.Status = LEADER_STATUS_TCP_ACK;
else
Leader.Status = LEADER_STATUS;
if(Adapter->PackInfo[QueueIndex].bEthCSSupport)
{
Leader.PLength = Packet->len;
if(skb_headroom(Packet) < LEADER_SIZE)
{
if((status = skb_cow(Packet,LEADER_SIZE)))
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL,"bcm_transmit : Failed To Increase headRoom\n");
goto errExit;
}
}
skb_push(Packet, LEADER_SIZE);
memcpy(Packet->data, &Leader, LEADER_SIZE);
}
else
{
Leader.PLength = Packet->len - ETH_HLEN;
memcpy((LEADER*)skb_pull(Packet, (ETH_HLEN - LEADER_SIZE)), &Leader, LEADER_SIZE);
}
status = Adapter->interface_transmit(Adapter->pvInterfaceAdapter,
Packet->data, (Leader.PLength + LEADER_SIZE));
if(status)
{
++Adapter->dev->stats.tx_errors;
if (netif_msg_tx_err(Adapter))
pr_info(PFX "%s: transmit error %d\n", Adapter->dev->name,
status);
}
else
{
struct net_device_stats *netstats = &Adapter->dev->stats;
Adapter->PackInfo[QueueIndex].uiTotalTxBytes += Leader.PLength;
netstats->tx_bytes += Leader.PLength;
++netstats->tx_packets;
Adapter->PackInfo[QueueIndex].uiCurrentTokenCount -= Leader.PLength << 3;
Adapter->PackInfo[QueueIndex].uiSentBytes += (Packet->len);
Adapter->PackInfo[QueueIndex].uiSentPackets++;
Adapter->PackInfo[QueueIndex].NumOfPacketsSent++;
atomic_dec(&Adapter->PackInfo[QueueIndex].uiPerSFTxResourceCount);
Adapter->PackInfo[QueueIndex].uiThisPeriodSentBytes += Leader.PLength;
}
atomic_dec(&Adapter->CurrNumFreeTxDesc);
errExit:
dev_kfree_skb(Packet);
return status;
}