本文整理汇总了C++中skb_clone函数的典型用法代码示例。如果您正苦于以下问题:C++ skb_clone函数的具体用法?C++ skb_clone怎么用?C++ skb_clone使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了skb_clone函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: ip_call_ra_chain
/*
* Process Router Attention IP option (RFC 2113)
*/
bool ip_call_ra_chain(struct sk_buff *skb)
{
struct ip_ra_chain *ra;
u8 protocol = ip_hdr(skb)->protocol;
struct sock *last = NULL;
struct net_device *dev = skb->dev;
for (ra = rcu_dereference(ip_ra_chain); ra; ra = rcu_dereference(ra->next)) {
struct sock *sk = ra->sk;
/* If socket is bound to an interface, only report
* the packet if it came from that interface.
*/
if (sk && inet_sk(sk)->inet_num == protocol &&
(!sk->sk_bound_dev_if ||
sk->sk_bound_dev_if == dev->ifindex) &&
net_eq(sock_net(sk), dev_net(dev))) {
if (ip_is_fragment(ip_hdr(skb))) {
if (ip_defrag(skb, IP_DEFRAG_CALL_RA_CHAIN))
return true;
}
if (last) {
struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
if (skb2)
raw_rcv(last, skb2);
}
last = sk;
}
}
if (last) {
raw_rcv(last, skb);
return true;
}
return false;
}
示例2: dccp_send_close
/*
* Send a DCCP_PKT_CLOSE/CLOSEREQ. The caller locks the socket for us. This
* cannot be allowed to fail queueing a DCCP_PKT_CLOSE/CLOSEREQ frame under
* any circumstances.
*/
void dccp_send_close(struct sock *sk, const int active)
{
struct dccp_sock *dp = dccp_sk(sk);
struct sk_buff *skb;
const gfp_t prio = active ? GFP_KERNEL : GFP_ATOMIC;
skb = alloc_skb(sk->sk_prot->max_header, prio);
if (skb == NULL)
return;
/* Reserve space for headers and prepare control bits. */
skb_reserve(skb, sk->sk_prot->max_header);
if (dp->dccps_role == DCCP_ROLE_SERVER && !dp->dccps_server_timewait)
DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_CLOSEREQ;
else
DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_CLOSE;
if (active) {
dccp_write_xmit(sk, 1);
dccp_skb_entail(sk, skb);
dccp_transmit_skb(sk, skb_clone(skb, prio));
/*
* Retransmission timer for active-close: RFC 4340, 8.3 requires
* to retransmit the Close/CloseReq until the CLOSING/CLOSEREQ
* state can be left. The initial timeout is 2 RTTs.
* Since RTT measurement is done by the CCIDs, there is no easy
* way to get an RTT sample. The fallback RTT from RFC 4340, 3.4
* is too low (200ms); we use a high value to avoid unnecessary
* retransmissions when the link RTT is > 0.2 seconds.
* FIXME: Let main module sample RTTs and use that instead.
*/
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
DCCP_TIMEOUT_INIT, DCCP_RTO_MAX);
} else
dccp_transmit_skb(sk, skb);
}
示例3: DuplicatePacket
PNDIS_PACKET DuplicatePacket(
IN PRTMP_ADAPTER pAd,
IN PNDIS_PACKET pPacket,
IN UCHAR FromWhichBSSID)
{
struct sk_buff *skb;
PNDIS_PACKET pRetPacket = NULL;
USHORT DataSize;
UCHAR *pData;
DataSize = (USHORT) GET_OS_PKT_LEN(pPacket);
pData = (PUCHAR) GET_OS_PKT_DATAPTR(pPacket);
skb = skb_clone(RTPKT_TO_OSPKT(pPacket), MEM_ALLOC_FLAG);
if (skb)
{
skb->dev = get_netdev_from_bssid(pAd, FromWhichBSSID);
pRetPacket = OSPKT_TO_RTPKT(skb);
}
return pRetPacket;
}
示例4: ip6_output2
static int ip6_output2(struct sk_buff *skb)
{
struct dst_entry *dst = skb->dst;
struct net_device *dev = dst->dev;
skb->protocol = htons(ETH_P_IPV6);
skb->dev = dev;
if (ipv6_addr_is_multicast(&skb->nh.ipv6h->daddr)) {
struct ipv6_pinfo* np = skb->sk ? inet6_sk(skb->sk) : NULL;
if (!(dev->flags & IFF_LOOPBACK) && (!np || np->mc_loop) &&
ipv6_chk_mcast_addr(dev, &skb->nh.ipv6h->daddr,
&skb->nh.ipv6h->saddr)) {
struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
/* Do not check for IFF_ALLMULTI; multicast routing
is not supported in any case.
*/
if (newskb)
NF_HOOK(PF_INET6, NF_IP6_POST_ROUTING, newskb, NULL,
newskb->dev,
ip6_dev_loopback_xmit);
if (skb->nh.ipv6h->hop_limit == 0) {
IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
kfree_skb(skb);
return 0;
}
}
IP6_INC_STATS(IPSTATS_MIB_OUTMCASTPKTS);
}
return NF_HOOK(PF_INET6, NF_IP6_POST_ROUTING, skb,NULL, skb->dev,ip6_output_finish);
}
示例5: rose_rebuild_header
static int rose_rebuild_header(struct sk_buff *skb)
{
#ifdef CONFIG_INET
struct net_device *dev = skb->dev;
struct net_device_stats *stats = &dev->stats;
unsigned char *bp = (unsigned char *)skb->data;
struct sk_buff *skbn;
unsigned int len;
if (arp_find(bp + 7, skb)) {
return 1;
}
if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
kfree_skb(skb);
return 1;
}
if (skb->sk != NULL)
skb_set_owner_w(skbn, skb->sk);
kfree_skb(skb);
len = skbn->len;
if (!rose_route_frame(skbn, NULL)) {
kfree_skb(skbn);
stats->tx_errors++;
return 1;
}
stats->tx_packets++;
stats->tx_bytes += len;
#endif
return 1;
}
示例6: nr_send_nak_frame
void nr_send_nak_frame(struct sock *sk)
{
struct sk_buff *skb, *skbn;
struct nr_sock *nr = nr_sk(sk);
if ((skb = skb_peek(&nr->ack_queue)) == NULL)
return;
if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL)
return;
skbn->data[2] = nr->va;
skbn->data[3] = nr->vr;
if (nr->condition & NR_COND_OWN_RX_BUSY)
skbn->data[4] |= NR_CHOKE_FLAG;
nr_transmit_buffer(sk, skbn);
nr->condition &= ~NR_COND_ACK_PENDING;
nr->vl = nr->vr;
nr_stop_t1timer(sk);
}
示例7: ip6_finish_output2
static int ip6_finish_output2(struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
struct net_device *dev = dst->dev;
struct neighbour *neigh;
int res;
skb->protocol = htons(ETH_P_IPV6);
skb->dev = dev;
if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(skb->sk) &&
((mroute6_socket(dev_net(dev), skb) &&
!(IP6CB(skb)->flags & IP6SKB_FORWARDED)) ||
ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr,
&ipv6_hdr(skb)->saddr))) {
struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
/* Do not check for IFF_ALLMULTI; multicast routing
is not supported in any case.
*/
if (newskb)
NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING,
newskb, NULL, newskb->dev,
ip6_dev_loopback_xmit);
if (ipv6_hdr(skb)->hop_limit == 0) {
IP6_INC_STATS(dev_net(dev), idev,
IPSTATS_MIB_OUTDISCARDS);
kfree_skb(skb);
return 0;
}
}
IP6_UPD_PO_STATS(dev_net(dev), idev, IPSTATS_MIB_OUTMCAST,
skb->len);
}
rcu_read_lock();
if (dst->hh) {
res = neigh_hh_output(dst->hh, skb);
rcu_read_unlock();
return res;
} else {
neigh = dst_get_neighbour(dst);
if (neigh) {
res = neigh->output(skb);
rcu_read_unlock();
return res;
}
rcu_read_unlock();
}
IP6_INC_STATS_BH(dev_net(dst->dev),
ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
kfree_skb(skb);
return -EINVAL;
}
示例8: ax25_kick
void ax25_kick(ax25_cb *ax25)
{
struct sk_buff *skb, *skbn;
int last = 1;
unsigned short start, end, next;
if (ax25->state != AX25_STATE_3 && ax25->state != AX25_STATE_4)
return;
if (ax25->condition & AX25_COND_PEER_RX_BUSY)
return;
if (skb_peek(&ax25->write_queue) == NULL)
return;
start = (skb_peek(&ax25->ack_queue) == NULL) ? ax25->va : ax25->vs;
end = (ax25->va + ax25->window) % ax25->modulus;
if (start == end)
return;
/*
* Transmit data until either we're out of data to send or
* the window is full. Send a poll on the final I frame if
* the window is filled.
*/
/*
* Dequeue the frame and copy it.
* Check for race with ax25_clear_queues().
*/
skb = skb_dequeue(&ax25->write_queue);
if (!skb)
return;
ax25->vs = start;
do {
if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
skb_queue_head(&ax25->write_queue, skb);
break;
}
if (skb->sk != NULL)
skb_set_owner_w(skbn, skb->sk);
next = (ax25->vs + 1) % ax25->modulus;
last = (next == end);
/*
* Transmit the frame copy.
* bke 960114: do not set the Poll bit on the last frame
* in DAMA mode.
*/
switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
case AX25_PROTO_STD_SIMPLEX:
case AX25_PROTO_STD_DUPLEX:
ax25_send_iframe(ax25, skbn, (last) ? AX25_POLLON : AX25_POLLOFF);
break;
#ifdef CONFIG_AX25_DAMA_SLAVE
case AX25_PROTO_DAMA_SLAVE:
ax25_send_iframe(ax25, skbn, AX25_POLLOFF);
break;
#endif
}
ax25->vs = next;
/*
* Requeue the original data frame.
*/
skb_queue_tail(&ax25->ack_queue, skb);
} while (!last && (skb = skb_dequeue(&ax25->write_queue)) != NULL);
ax25->condition &= ~AX25_COND_ACK_PENDING;
if (!ax25_t1timer_running(ax25)) {
ax25_stop_t3timer(ax25);
ax25_calculate_t1(ax25);
ax25_start_t1timer(ax25);
}
}
示例9: rtw_recv_indicatepkt
int rtw_recv_indicatepkt(struct adapter *padapter,
struct recv_frame *precv_frame)
{
struct recv_priv *precvpriv;
struct __queue *pfree_recv_queue;
struct sk_buff *skb;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
precvpriv = &(padapter->recvpriv);
pfree_recv_queue = &(precvpriv->free_recv_queue);
skb = precv_frame->pkt;
if (!skb) {
RT_TRACE(_module_recv_osdep_c_, _drv_err_,
("rtw_recv_indicatepkt():skb == NULL something wrong!!!!\n"));
goto _recv_indicatepkt_drop;
}
RT_TRACE(_module_recv_osdep_c_, _drv_info_,
("rtw_recv_indicatepkt():skb != NULL !!!\n"));
RT_TRACE(_module_recv_osdep_c_, _drv_info_,
("rtw_recv_indicatepkt():precv_frame->rx_head =%p precv_frame->hdr.rx_data =%p\n",
precv_frame->rx_head, precv_frame->rx_data));
RT_TRACE(_module_recv_osdep_c_, _drv_info_,
("precv_frame->hdr.rx_tail =%p precv_frame->rx_end =%p precv_frame->hdr.len =%d\n",
precv_frame->rx_tail, precv_frame->rx_end,
precv_frame->len));
skb->data = precv_frame->rx_data;
skb_set_tail_pointer(skb, precv_frame->len);
skb->len = precv_frame->len;
RT_TRACE(_module_recv_osdep_c_, _drv_info_,
("skb->head =%p skb->data =%p skb->tail =%p skb->end =%p skb->len =%d\n",
skb->head, skb->data, skb_tail_pointer(skb),
skb_end_pointer(skb), skb->len));
if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
struct sk_buff *pskb2 = NULL;
struct sta_info *psta = NULL;
struct sta_priv *pstapriv = &padapter->stapriv;
struct rx_pkt_attrib *pattrib = &precv_frame->attrib;
int bmcast = IS_MCAST(pattrib->dst);
if (memcmp(pattrib->dst, myid(&padapter->eeprompriv),
ETH_ALEN)) {
if (bmcast) {
psta = rtw_get_bcmc_stainfo(padapter);
pskb2 = skb_clone(skb, GFP_ATOMIC);
} else {
psta = rtw_get_stainfo(pstapriv, pattrib->dst);
}
if (psta) {
struct net_device *pnetdev;
pnetdev = (struct net_device *)padapter->pnetdev;
skb->dev = pnetdev;
skb_set_queue_mapping(skb, rtw_recv_select_queue(skb));
rtw_xmit_entry(skb, pnetdev);
if (bmcast)
skb = pskb2;
else
goto _recv_indicatepkt_end;
}
}
}
rcu_read_lock();
rcu_dereference(padapter->pnetdev->rx_handler_data);
rcu_read_unlock();
skb->ip_summed = CHECKSUM_NONE;
skb->dev = padapter->pnetdev;
skb->protocol = eth_type_trans(skb, padapter->pnetdev);
netif_rx(skb);
_recv_indicatepkt_end:
/* pointers to NULL before rtw_free_recvframe() */
precv_frame->pkt = NULL;
rtw_free_recvframe(precv_frame, pfree_recv_queue);
RT_TRACE(_module_recv_osdep_c_, _drv_info_,
("\n rtw_recv_indicatepkt :after netif_rx!!!!\n"));
return _SUCCESS;
_recv_indicatepkt_drop:
/* enqueue back to free_recv_queue */
rtw_free_recvframe(precv_frame, pfree_recv_queue);
//.........这里部分代码省略.........
示例10: pep_connreq_rcv
static int pep_connreq_rcv(struct sock *sk, struct sk_buff *skb)
{
struct sock *newsk;
struct pep_sock *newpn, *pn = pep_sk(sk);
struct pnpipehdr *hdr;
struct sockaddr_pn dst;
u16 peer_type;
u8 pipe_handle, enabled, n_sb;
u8 aligned = 0;
if (!pskb_pull(skb, sizeof(*hdr) + 4))
return -EINVAL;
hdr = pnp_hdr(skb);
pipe_handle = hdr->pipe_handle;
switch (hdr->state_after_connect) {
case PN_PIPE_DISABLE:
enabled = 0;
break;
case PN_PIPE_ENABLE:
enabled = 1;
break;
default:
pep_reject_conn(sk, skb, PN_PIPE_ERR_INVALID_PARAM);
return -EINVAL;
}
peer_type = hdr->other_pep_type << 8;
if (unlikely(sk->sk_state != TCP_LISTEN) || sk_acceptq_is_full(sk)) {
pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE);
return -ENOBUFS;
}
/* Parse sub-blocks (options) */
n_sb = hdr->data[4];
while (n_sb > 0) {
u8 type, buf[1], len = sizeof(buf);
const u8 *data = pep_get_sb(skb, &type, &len, buf);
if (data == NULL)
return -EINVAL;
switch (type) {
case PN_PIPE_SB_CONNECT_REQ_PEP_SUB_TYPE:
if (len < 1)
return -EINVAL;
peer_type = (peer_type & 0xff00) | data[0];
break;
case PN_PIPE_SB_ALIGNED_DATA:
aligned = data[0] != 0;
break;
}
n_sb--;
}
skb = skb_clone(skb, GFP_ATOMIC);
if (!skb)
return -ENOMEM;
/* Create a new to-be-accepted sock */
newsk = sk_alloc(sock_net(sk), PF_PHONET, GFP_ATOMIC, sk->sk_prot);
if (!newsk) {
kfree_skb(skb);
return -ENOMEM;
}
sock_init_data(NULL, newsk);
newsk->sk_state = TCP_SYN_RECV;
newsk->sk_backlog_rcv = pipe_do_rcv;
newsk->sk_protocol = sk->sk_protocol;
newsk->sk_destruct = pipe_destruct;
newpn = pep_sk(newsk);
pn_skb_get_dst_sockaddr(skb, &dst);
newpn->pn_sk.sobject = pn_sockaddr_get_object(&dst);
newpn->pn_sk.resource = pn->pn_sk.resource;
skb_queue_head_init(&newpn->ctrlreq_queue);
newpn->pipe_handle = pipe_handle;
atomic_set(&newpn->tx_credits, 0);
newpn->peer_type = peer_type;
newpn->rx_credits = 0;
newpn->rx_fc = newpn->tx_fc = PN_LEGACY_FLOW_CONTROL;
newpn->init_enable = enabled;
newpn->aligned = aligned;
BUG_ON(!skb_queue_empty(&newsk->sk_receive_queue));
skb_queue_head(&newsk->sk_receive_queue, skb);
if (!sock_flag(sk, SOCK_DEAD))
sk->sk_data_ready(sk, 0);
sk_acceptq_added(sk);
sk_add_node(newsk, &pn->ackq);
return 0;
}
示例11: dev_queue_xmit
//.........这里部分代码省略.........
* Make sure we haven't missed an interrupt.
*/
printk("dev_queue_xmit: worked around a missed interrupt\n");
start_bh_atomic();
dev->hard_start_xmit(NULL, dev);
end_bh_atomic();
return;
}
/*
* Negative priority is used to flag a frame that is being pulled from the
* queue front as a retransmit attempt. It therefore goes back on the queue
* start on a failure.
*/
if (pri < 0)
{
pri = -pri-1;
where = 1;
}
if (pri >= DEV_NUMBUFFS)
{
printk("bad priority in dev_queue_xmit.\n");
pri = 1;
}
/*
* If the address has not been resolved. Call the device header rebuilder.
* This can cover all protocols and technically not just ARP either.
*/
if (!skb->arp && dev->rebuild_header(skb->data, dev, skb->raddr, skb)) {
return;
}
save_flags(flags);
cli();
if (!where) {
#ifdef CONFIG_SLAVE_BALANCING
skb->in_dev_queue=1;
#endif
skb_queue_tail(dev->buffs + pri,skb);
skb_device_unlock(skb); /* Buffer is on the device queue and can be freed safely */
skb = skb_dequeue(dev->buffs + pri);
skb_device_lock(skb); /* New buffer needs locking down */
#ifdef CONFIG_SLAVE_BALANCING
skb->in_dev_queue=0;
#endif
}
restore_flags(flags);
/* copy outgoing packets to any sniffer packet handlers */
if(!where)
{
for (nitcount= dev_nit, ptype = ptype_base; nitcount > 0 && ptype != NULL; ptype = ptype->next)
{
/* Never send packets back to the socket
* they originated from - MvS ([email protected])
*/
if (ptype->type == htons(ETH_P_ALL) &&
(ptype->dev == dev || !ptype->dev) &&
((struct sock *)ptype->data != skb->sk))
{
struct sk_buff *skb2;
if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL)
break;
/*
* The protocol knows this has (for other paths) been taken off
* and adds it back.
*/
skb2->len-=skb->dev->hard_header_len;
ptype->func(skb2, skb->dev, ptype);
nitcount--;
}
}
}
start_bh_atomic();
if (dev->hard_start_xmit(skb, dev) == 0) {
end_bh_atomic();
/*
* Packet is now solely the responsibility of the driver
*/
return;
}
end_bh_atomic();
/*
* Transmission failed, put skb back into a list. Once on the list it's safe and
* no longer device locked (it can be freed safely from the device queue)
*/
cli();
#ifdef CONFIG_SLAVE_BALANCING
skb->in_dev_queue=1;
dev->pkt_queue++;
#endif
skb_device_unlock(skb);
skb_queue_head(dev->buffs + pri,skb);
restore_flags(flags);
}
示例12: ip6_finish_output2
static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
struct net_device *dev = dst->dev;
struct neighbour *neigh;
struct in6_addr *nexthop;
int ret;
skb->protocol = htons(ETH_P_IPV6);
skb->dev = dev;
if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(sk) &&
((mroute6_socket(net, skb) &&
!(IP6CB(skb)->flags & IP6SKB_FORWARDED)) ||
ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr,
&ipv6_hdr(skb)->saddr))) {
struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
/* Do not check for IFF_ALLMULTI; multicast routing
is not supported in any case.
*/
if (newskb)
NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING,
net, sk, newskb, NULL, newskb->dev,
dev_loopback_xmit);
if (ipv6_hdr(skb)->hop_limit == 0) {
IP6_INC_STATS(net, idev,
IPSTATS_MIB_OUTDISCARDS);
kfree_skb(skb);
return 0;
}
}
IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, skb->len);
if (IPV6_ADDR_MC_SCOPE(&ipv6_hdr(skb)->daddr) <=
IPV6_ADDR_SCOPE_NODELOCAL &&
!(dev->flags & IFF_LOOPBACK)) {
kfree_skb(skb);
return 0;
}
}
rcu_read_lock_bh();
nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
if (unlikely(!neigh))
neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
if (!IS_ERR(neigh)) {
ret = dst_neigh_output(dst, neigh, skb);
rcu_read_unlock_bh();
return ret;
}
rcu_read_unlock_bh();
IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
kfree_skb(skb);
return -EINVAL;
}
示例13: lowpan_frag_reasm
/* Check if this packet is complete.
* Returns NULL on failure by any reason, and pointer
* to current nexthdr field in reassembled frame.
*
* It is called with locked fq, and caller must check that
* queue is eligible for reassembly i.e. it is not COMPLETE,
* the last and the first frames arrived and all the bits are here.
*/
static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev,
struct net_device *dev)
{
struct sk_buff *fp, *head = fq->q.fragments;
int sum_truesize;
inet_frag_kill(&fq->q, &lowpan_frags);
/* Make the one we just received the head. */
if (prev) {
head = prev->next;
fp = skb_clone(head, GFP_ATOMIC);
if (!fp)
goto out_oom;
fp->next = head->next;
if (!fp->next)
fq->q.fragments_tail = fp;
prev->next = fp;
skb_morph(head, fq->q.fragments);
head->next = fq->q.fragments->next;
consume_skb(fq->q.fragments);
fq->q.fragments = head;
}
/* Head of list must not be cloned. */
if (skb_unclone(head, GFP_ATOMIC))
goto out_oom;
/* If the first fragment is fragmented itself, we split
* it to two chunks: the first with data and paged part
* and the second, holding only fragments.
*/
if (skb_has_frag_list(head)) {
struct sk_buff *clone;
int i, plen = 0;
clone = alloc_skb(0, GFP_ATOMIC);
if (!clone)
goto out_oom;
clone->next = head->next;
head->next = clone;
skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
skb_frag_list_init(head);
for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
clone->len = head->data_len - plen;
clone->data_len = clone->len;
head->data_len -= clone->len;
head->len -= clone->len;
add_frag_mem_limit(&fq->q, clone->truesize);
}
WARN_ON(head == NULL);
sum_truesize = head->truesize;
for (fp = head->next; fp;) {
bool headstolen;
int delta;
struct sk_buff *next = fp->next;
sum_truesize += fp->truesize;
if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
kfree_skb_partial(fp, headstolen);
} else {
if (!skb_shinfo(head)->frag_list)
skb_shinfo(head)->frag_list = fp;
head->data_len += fp->len;
head->len += fp->len;
head->truesize += fp->truesize;
}
fp = next;
}
sub_frag_mem_limit(&fq->q, sum_truesize);
head->next = NULL;
head->dev = dev;
head->tstamp = fq->q.stamp;
fq->q.fragments = NULL;
fq->q.fragments_tail = NULL;
return 1;
out_oom:
net_dbg_ratelimited("lowpan_frag_reasm: no memory for reassembly\n");
return -1;
}
示例14: ip_frag_reasm
static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
struct net_device *dev)
{
struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
struct iphdr *iph;
struct sk_buff *fp, *head = qp->q.fragments;
int len;
int ihlen;
int err;
u8 ecn;
ipq_kill(qp);
ecn = ip_frag_ecn_table[qp->ecn];
if (unlikely(ecn == 0xff)) {
err = -EINVAL;
goto out_fail;
}
/* Make the one we just received the head. */
if (prev) {
head = prev->next;
fp = skb_clone(head, GFP_ATOMIC);
if (!fp)
goto out_nomem;
fp->next = head->next;
if (!fp->next)
qp->q.fragments_tail = fp;
prev->next = fp;
skb_morph(head, qp->q.fragments);
head->next = qp->q.fragments->next;
consume_skb(qp->q.fragments);
qp->q.fragments = head;
}
WARN_ON(!head);
WARN_ON(FRAG_CB(head)->offset != 0);
/* Allocate a new buffer for the datagram. */
ihlen = ip_hdrlen(head);
len = ihlen + qp->q.len;
err = -E2BIG;
if (len > 65535)
goto out_oversize;
/* Head of list must not be cloned. */
if (skb_unclone(head, GFP_ATOMIC))
goto out_nomem;
/* If the first fragment is fragmented itself, we split
* it to two chunks: the first with data and paged part
* and the second, holding only fragments. */
if (skb_has_frag_list(head)) {
struct sk_buff *clone;
int i, plen = 0;
clone = alloc_skb(0, GFP_ATOMIC);
if (!clone)
goto out_nomem;
clone->next = head->next;
head->next = clone;
skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
skb_frag_list_init(head);
for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
clone->len = clone->data_len = head->data_len - plen;
head->data_len -= clone->len;
head->len -= clone->len;
clone->csum = 0;
clone->ip_summed = head->ip_summed;
add_frag_mem_limit(qp->q.net, clone->truesize);
}
skb_shinfo(head)->frag_list = head->next;
skb_push(head, head->data - skb_network_header(head));
for (fp=head->next; fp; fp = fp->next) {
head->data_len += fp->len;
head->len += fp->len;
if (head->ip_summed != fp->ip_summed)
head->ip_summed = CHECKSUM_NONE;
else if (head->ip_summed == CHECKSUM_COMPLETE)
head->csum = csum_add(head->csum, fp->csum);
head->truesize += fp->truesize;
}
sub_frag_mem_limit(qp->q.net, head->truesize);
head->next = NULL;
head->dev = dev;
head->tstamp = qp->q.stamp;
IPCB(head)->frag_max_size = max(qp->max_df_size, qp->q.max_size);
iph = ip_hdr(head);
iph->tot_len = htons(len);
iph->tos |= ecn;
/* When we set IP_DF on a refragmented skb we must also force a
//.........这里部分代码省略.........
示例15: ip_frag_reasm
static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
struct net_device *dev)
{
struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
struct iphdr *iph;
struct sk_buff *fp, *head = qp->q.fragments;
int len;
int ihlen;
int err;
int sum_truesize;
u8 ecn;
ipq_kill(qp);
ecn = ip_frag_ecn_table[qp->ecn];
if (unlikely(ecn == 0xff)) {
err = -EINVAL;
goto out_fail;
}
/* Make the one we just received the head. */
if (prev) {
head = prev->next;
fp = skb_clone(head, GFP_ATOMIC);
if (!fp)
goto out_nomem;
fp->next = head->next;
if (!fp->next)
qp->q.fragments_tail = fp;
prev->next = fp;
skb_morph(head, qp->q.fragments);
head->next = qp->q.fragments->next;
consume_skb(qp->q.fragments);
qp->q.fragments = head;
}
WARN_ON(head == NULL);
WARN_ON(FRAG_CB(head)->offset != 0);
/* Allocate a new buffer for the datagram. */
ihlen = ip_hdrlen(head);
len = ihlen + qp->q.len;
err = -E2BIG;
if (len > 65535)
goto out_oversize;
/* Head of list must not be cloned. */
if (skb_unclone(head, GFP_ATOMIC))
goto out_nomem;
/* If the first fragment is fragmented itself, we split
* it to two chunks: the first with data and paged part
* and the second, holding only fragments. */
if (skb_has_frag_list(head)) {
struct sk_buff *clone;
int i, plen = 0;
if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL)
goto out_nomem;
clone->next = head->next;
head->next = clone;
skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
skb_frag_list_init(head);
for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
clone->len = clone->data_len = head->data_len - plen;
head->data_len -= clone->len;
head->len -= clone->len;
clone->csum = 0;
clone->ip_summed = head->ip_summed;
add_frag_mem_limit(&qp->q, clone->truesize);
}
skb_push(head, head->data - skb_network_header(head));
sum_truesize = head->truesize;
for (fp = head->next; fp;) {
bool headstolen;
int delta;
struct sk_buff *next = fp->next;
sum_truesize += fp->truesize;
if (head->ip_summed != fp->ip_summed)
head->ip_summed = CHECKSUM_NONE;
else if (head->ip_summed == CHECKSUM_COMPLETE)
head->csum = csum_add(head->csum, fp->csum);
if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
kfree_skb_partial(fp, headstolen);
} else {
if (!skb_shinfo(head)->frag_list)
skb_shinfo(head)->frag_list = fp;
head->data_len += fp->len;
head->len += fp->len;
head->truesize += fp->truesize;
}
fp = next;
//.........这里部分代码省略.........