本文整理汇总了C++中consume_skb函数的典型用法代码示例。如果您正苦于以下问题:C++ consume_skb函数的具体用法?C++ consume_skb怎么用?C++ consume_skb使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了consume_skb函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: bpf_dp_channel_push_on_plum
/* userspace injects packet into plum */
int bpf_dp_channel_push_on_plum(struct datapath *dp, u32 plum_id, u32 port_id,
u32 fwd_plum_id, u32 arg1, u32 arg2, u32 arg3,
u32 arg4, struct sk_buff *skb, u32 direction)
{
struct plum_stack stack = {};
struct plum_stack_frame first_frame = {};
struct plum_stack_frame *frame;
struct bpf_dp_context *ctx;
u32 dest;
frame = &first_frame;
frame->kmem = 0;
INIT_LIST_HEAD(&stack.list);
ctx = &frame->ctx;
ctx->stack = &stack;
ctx->skb = skb;
ctx->dp = dp;
bpf_dp_ctx_init(ctx);
rcu_read_lock();
if (direction == OVS_BPF_OUT_DIR) {
ctx->context.plum_id = plum_id;
stack.curr_frame = frame;
bpf_forward(&ctx->context, port_id);
execute_plums(&stack);
consume_skb(skb);
} else if (direction == OVS_BPF_IN_DIR) {
dest = MUX(plum_id, port_id);
frame->dest = dest;
stack.curr_frame = NULL;
list_add(&frame->link, &stack.list);
execute_plums(&stack);
} else if (direction == OVS_BPF_FWD_TO_PLUM) {
ctx->context.plum_id = plum_id;
ctx->context.arg1 = arg1;
ctx->context.arg2 = arg2;
ctx->context.arg3 = arg3;
ctx->context.arg4 = arg4;
stack.curr_frame = frame;
bpf_forward_to_plum(&ctx->context, fwd_plum_id);
execute_plums(&stack);
consume_skb(skb);
}
rcu_read_unlock();
return 0;
}
示例2: synproxy_tg6
static unsigned int
synproxy_tg6(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct xt_synproxy_info *info = par->targinfo;
struct net *net = xt_net(par);
struct synproxy_net *snet = synproxy_pernet(net);
struct synproxy_options opts = {};
struct tcphdr *th, _th;
if (nf_ip6_checksum(skb, xt_hooknum(par), par->thoff, IPPROTO_TCP))
return NF_DROP;
th = skb_header_pointer(skb, par->thoff, sizeof(_th), &_th);
if (th == NULL)
return NF_DROP;
if (!synproxy_parse_options(skb, par->thoff, th, &opts))
return NF_DROP;
if (th->syn && !(th->ack || th->fin || th->rst)) {
/* Initial SYN from client */
this_cpu_inc(snet->stats->syn_received);
if (th->ece && th->cwr)
opts.options |= XT_SYNPROXY_OPT_ECN;
opts.options &= info->options;
if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP)
synproxy_init_timestamp_cookie(info, &opts);
else
opts.options &= ~(XT_SYNPROXY_OPT_WSCALE |
XT_SYNPROXY_OPT_SACK_PERM |
XT_SYNPROXY_OPT_ECN);
synproxy_send_client_synack(net, skb, th, &opts);
consume_skb(skb);
return NF_STOLEN;
} else if (th->ack && !(th->fin || th->rst || th->syn)) {
/* ACK from client */
if (synproxy_recv_client_ack(net, skb, th, &opts, ntohl(th->seq))) {
consume_skb(skb);
return NF_STOLEN;
} else {
return NF_DROP;
}
}
return XT_CONTINUE;
}
示例3: rw_fpath_kni_handle_frame
/**
* Before the last deliver skb to ETH_P_ALL is called, this registered handler will
* be called. During this time, we will revert the pkt_type from control buf in skb
*
* @param[in] skb - double pointer to the skb in case we need to clone..
*
* @returns action that needs to be taken on the skb. we can consume it.
*/
rx_handler_result_t rw_fpath_kni_handle_frame(struct sk_buff **pskb)
{
struct sk_buff *skb = (struct sk_buff *)*pskb;
struct kni_dev *kni;
rx_handler_result_t ret = RX_HANDLER_PASS;
skb = skb_share_check(skb, GFP_ATOMIC);
if (unlikely(!skb))
return RX_HANDLER_CONSUMED;
if (!skb->dev){
KNI_ERR("No device in the skb in rx_handler\n");
return RX_HANDLER_PASS;
}
kni = netdev_priv(skb->dev);
if (!kni){
KNI_ERR("no kni private data in the device in rx_handler\n");
return RX_HANDLER_PASS;
}
*pskb = skb;
switch (skb->pkt_type){
case PACKET_OUTGOING:
skb->pkt_type = PACKET_OTHERHOST;
kni->rx_treat_as_tx_filtered++;
consume_skb(skb);
ret = RX_HANDLER_CONSUMED;
break;
case PACKET_LOOPBACK:
skb->pkt_type = skb->mark;
if (skb->pkt_type == PACKET_OTHERHOST){
/*Force the packet to be accepted by the IP stack*/
skb->pkt_type = 0;
}
kni->rx_treat_as_tx_delivered++;
skb->mark = 0;
break;
case PACKET_OTHERHOST:
kni->rx_filtered++;
consume_skb(skb);
ret = RX_HANDLER_CONSUMED;
break;
default:
kni->rx_delivered++;
break;
}
return ret;
}
示例4: arp_rcv
static int arp_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
const struct arphdr *arp;
/* do not tweak dropwatch on an ARP we will ignore */
if (dev->flags & IFF_NOARP ||
skb->pkt_type == PACKET_OTHERHOST ||
skb->pkt_type == PACKET_LOOPBACK)
goto consumeskb;
skb = skb_share_check(skb, GFP_ATOMIC);
if (!skb)
goto out_of_mem;
/* ARP header, plus 2 device addresses, plus 2 IP addresses. */
if (!pskb_may_pull(skb, arp_hdr_len(dev)))
goto freeskb;
arp = arp_hdr(skb);
if (arp->ar_hln != dev->addr_len || arp->ar_pln != 4)
goto freeskb;
memset(NEIGH_CB(skb), 0, sizeof(struct neighbour_cb));
return NF_HOOK(NFPROTO_ARP, NF_ARP_IN, skb, dev, NULL, arp_process);
consumeskb:
consume_skb(skb);
return 0;
freeskb:
kfree_skb(skb);
out_of_mem:
return 0;
}
示例5: bpf_dp_process_received_packet
/* packet arriving on vport processed here
* must be called with rcu_read_lock
*/
void bpf_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
{
struct datapath *dp = p->dp;
struct plum *plum;
u32 dest;
struct plum_stack stack = {};
struct plum_stack_frame first_frame = {};
struct plum_stack_frame *frame;
struct bpf_dp_context *ctx;
plum = rcu_dereference(dp->plums[0]);
dest = atomic_read(&plum->ports[p->port_no]);
if (dest) {
frame = &first_frame;
INIT_LIST_HEAD(&stack.list);
ctx = &frame->ctx;
ctx->stack = &stack;
ctx->context.port_id = p->port_no;
ctx->skb = skb;
ctx->dp = dp;
bpf_dp_ctx_init(ctx);
plum_update_stats(plum, p->port_no, skb, true);
frame->dest = dest;
stack.curr_frame = NULL;
list_add(&frame->link, &stack.list);
execute_plums(&stack);
} else {
consume_skb(skb);
}
}
示例6: skb_clone
static struct sock *run_bpf(struct sock_reuseport *reuse, u16 socks,
struct bpf_prog *prog, struct sk_buff *skb,
int hdr_len)
{
struct sk_buff *nskb = NULL;
u32 index;
if (skb_shared(skb)) {
nskb = skb_clone(skb, GFP_ATOMIC);
if (!nskb)
return NULL;
skb = nskb;
}
/* temporarily advance data past protocol header */
if (!pskb_pull(skb, hdr_len)) {
kfree_skb(nskb);
return NULL;
}
index = bpf_prog_run_save_cb(prog, skb);
__skb_push(skb, hdr_len);
consume_skb(nskb);
if (index >= socks)
return NULL;
return reuse->socks[index];
}
示例7: skb_clone
static struct sk_buff *skb_set_peeked(struct sk_buff *skb)
{
struct sk_buff *nskb;
if (skb->peeked)
return skb;
/* We have to unshare an skb before modifying it. */
if (!skb_shared(skb))
goto done;
nskb = skb_clone(skb, GFP_ATOMIC);
if (!nskb)
return ERR_PTR(-ENOMEM);
skb->prev->next = nskb;
skb->next->prev = nskb;
nskb->prev = skb->prev;
nskb->next = skb->next;
consume_skb(skb);
skb = nskb;
done:
skb->peeked = 1;
return skb;
}
示例8: printk
struct sk_buff *ax25_rt_build_path(struct sk_buff *skb, ax25_address *src,
ax25_address *dest, ax25_digi *digi)
{
struct sk_buff *skbn;
unsigned char *bp;
int len;
len = digi->ndigi * AX25_ADDR_LEN;
if (skb_headroom(skb) < len) {
if ((skbn = skb_realloc_headroom(skb, len)) == NULL) {
printk(KERN_CRIT "AX.25: ax25_dg_build_path - out of memory\n");
return NULL;
}
if (skb->sk != NULL)
skb_set_owner_w(skbn, skb->sk);
consume_skb(skb);
skb = skbn;
}
bp = skb_push(skb, len);
ax25_addr_build(bp, src, dest, digi, AX25_COMMAND, AX25_MODULUS);
return skb;
}
示例9: tbf_segment
/* GSO packet is too big, segment it so that tbf can transmit
* each segment in time
*/
static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
{
struct tbf_sched_data *q = qdisc_priv(sch);
struct sk_buff *segs, *nskb;
netdev_features_t features = netif_skb_features(skb);
int ret, nb;
segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
if (IS_ERR_OR_NULL(segs))
return qdisc_reshape_fail(skb, sch);
nb = 0;
while (segs) {
nskb = segs->next;
segs->next = NULL;
qdisc_skb_cb(segs)->pkt_len = segs->len;
ret = qdisc_enqueue(segs, q->qdisc);
if (ret != NET_XMIT_SUCCESS) {
if (net_xmit_drop_count(ret))
sch->qstats.drops++;
} else {
nb++;
}
segs = nskb;
}
sch->q.qlen += nb;
if (nb > 1)
qdisc_tree_decrease_qlen(sch, 1 - nb);
consume_skb(skb);
return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
}
示例10: ip_forward_finish_gso
/* called if GSO skb needs to be fragmented on forward */
static int ip_forward_finish_gso(struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
netdev_features_t features;
struct sk_buff *segs;
int ret = 0;
features = netif_skb_dev_features(skb, dst->dev);
segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
if (IS_ERR(segs)) {
kfree_skb(skb);
return -ENOMEM;
}
consume_skb(skb);
do {
struct sk_buff *nskb = segs->next;
int err;
segs->next = NULL;
err = dst_output(segs);
if (err && ret == 0)
ret = err;
segs = nskb;
} while (segs);
return ret;
}
示例11: tbf_segment
/* GSO packet is too big, segment it so that tbf can transmit
* each segment in time
*/
static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
struct tbf_sched_data *q = qdisc_priv(sch);
struct sk_buff *segs, *nskb;
netdev_features_t features = netif_skb_features(skb);
unsigned int len = 0, prev_len = qdisc_pkt_len(skb);
int ret, nb;
segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
if (IS_ERR_OR_NULL(segs))
return qdisc_drop(skb, sch, to_free);
nb = 0;
while (segs) {
nskb = segs->next;
skb_mark_not_on_list(segs);
qdisc_skb_cb(segs)->pkt_len = segs->len;
len += segs->len;
ret = qdisc_enqueue(segs, q->qdisc, to_free);
if (ret != NET_XMIT_SUCCESS) {
if (net_xmit_drop_count(ret))
qdisc_qstats_drop(sch);
} else {
nb++;
}
segs = nskb;
}
sch->q.qlen += nb;
if (nb > 1)
qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
consume_skb(skb);
return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
}
示例12: ax25_transmit_buffer
void ax25_transmit_buffer(ax25_cb *ax25, struct sk_buff *skb, int type)
{
struct sk_buff *skbn;
unsigned char *ptr;
int headroom;
if (ax25->ax25_dev == NULL) {
ax25_disconnect(ax25, ENETUNREACH);
return;
}
headroom = ax25_addr_size(ax25->digipeat);
if (skb_headroom(skb) < headroom) {
if ((skbn = skb_realloc_headroom(skb, headroom)) == NULL) {
printk(KERN_CRIT "AX.25: ax25_transmit_buffer - out of memory\n");
kfree_skb(skb);
return;
}
if (skb->sk != NULL)
skb_set_owner_w(skbn, skb->sk);
consume_skb(skb);
skb = skbn;
}
ptr = skb_push(skb, headroom);
ax25_addr_build(ptr, &ax25->source_addr, &ax25->dest_addr, ax25->digipeat, type, ax25->modulus);
ax25_queue_xmit(skb, ax25->ax25_dev->dev);
}
示例13: handle_fragments
/* Returns 0 on success, -EINPROGRESS if 'skb' is stolen, or other nonzero
* value if 'skb' is freed.
*/
static int handle_fragments(struct net *net, struct sw_flow_key *key,
u16 zone, struct sk_buff *skb)
{
struct ovs_gso_cb ovs_cb = *OVS_GSO_CB(skb);
if (!skb->dev) {
OVS_NLERR(true, "%s: skb has no dev; dropping", __func__);
return -EINVAL;
}
if (key->eth.type == htons(ETH_P_IP)) {
enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone;
int err;
memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
err = ip_defrag(skb, user);
if (err)
return err;
ovs_cb.dp_cb.mru = IPCB(skb)->frag_max_size;
#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
} else if (key->eth.type == htons(ETH_P_IPV6)) {
enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;
struct sk_buff *reasm;
memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
reasm = nf_ct_frag6_gather(skb, user);
if (!reasm)
return -EINPROGRESS;
if (skb == reasm) {
kfree_skb(skb);
return -EINVAL;
}
/* Don't free 'skb' even though it is one of the original
* fragments, as we're going to morph it into the head.
*/
skb_get(skb);
nf_ct_frag6_consume_orig(reasm);
key->ip.proto = ipv6_hdr(reasm)->nexthdr;
skb_morph(skb, reasm);
skb->next = reasm->next;
consume_skb(reasm);
ovs_cb.dp_cb.mru = IP6CB(skb)->frag_max_size;
#endif /* IP frag support */
} else {
kfree_skb(skb);
return -EPFNOSUPPORT;
}
key->ip.frag = OVS_FRAG_TYPE_NONE;
skb_clear_hash(skb);
skb->ignore_df = 1;
*OVS_GSO_CB(skb) = ovs_cb;
return 0;
}
示例14: baseband_usb_netdev_start_xmit
static netdev_tx_t baseband_usb_netdev_start_xmit(
struct sk_buff *skb, struct net_device *dev)
{
int i = 0;
struct baseband_usb *usb = netdev_priv(dev);/*wjp for pm*/
struct urb *urb;
unsigned char *buf;
int err;
pr_debug("baseband_usb_netdev_start_xmit\n");
/* check input */
if (!skb) {
pr_err("no skb\n");
return -EINVAL;
}
/* allocate urb */
urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!urb) {
pr_err("usb_alloc_urb() failed\n");
return -ENOMEM;
}
buf = kzalloc(skb->len - 14, GFP_ATOMIC);
if (!buf) {
pr_err("usb buffer kzalloc() failed\n");
usb_free_urb(urb);
return -ENOMEM;
}
err = skb_copy_bits(skb, 14, buf, skb->len - 14);
if (err < 0) {
pr_err("skb_copy_bits() failed - %d\n", err);
kfree(buf);
usb_free_urb(urb);
return err;
}
usb_fill_bulk_urb(urb, usb->usb.device, usb->usb.pipe.bulk.out,
buf, skb->len - 14,
usb_net_raw_ip_tx_urb_comp,
usb);
urb->transfer_flags = URB_ZERO_PACKET;
/* submit tx urb */
usb->usb.tx_urb = urb;
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err < 0) {
pr_err("usb_submit_urb() failed - err %d\n", err);
usb->usb.tx_urb = (struct urb *) 0;
kfree(urb->transfer_buffer);
usb_free_urb(urb);
kfree_skb(skb);
return err;
}
/* free skb */
consume_skb(skb);
return NETDEV_TX_OK;
}
示例15: vcan_tx
static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
{
struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
struct net_device_stats *stats = &dev->stats;
int loop;
if (can_dropped_invalid_skb(dev, skb))
return NETDEV_TX_OK;
stats->tx_packets++;
stats->tx_bytes += cfd->len;
/* set flag whether this packet has to be looped back */
loop = skb->pkt_type == PACKET_LOOPBACK;
if (!echo) {
/* no echo handling available inside this driver */
if (loop) {
/*
* only count the packets here, because the
* CAN core already did the echo for us
*/
stats->rx_packets++;
stats->rx_bytes += cfd->len;
}
consume_skb(skb);
return NETDEV_TX_OK;
}
/* perform standard echo handling for CAN network interfaces */
if (loop) {
skb = can_create_echo_skb(skb);
if (!skb)
return NETDEV_TX_OK;
/* receive with packet counting */
vcan_rx(skb, dev);
} else {
/* no looped packets => no counting */
consume_skb(skb);
}
return NETDEV_TX_OK;
}