本文整理汇总了C++中skb_reserve函数的典型用法代码示例。如果您正苦于以下问题:C++ skb_reserve函数的具体用法?C++ skb_reserve怎么用?C++ skb_reserve使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了skb_reserve函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: raw_send_hdrinc
static int raw_send_hdrinc(struct sock *sk, void *from, size_t length,
struct rtable *rt,
unsigned int flags)
{
struct inet_sock *inet = inet_sk(sk);
int hh_len;
struct iphdr *iph;
struct sk_buff *skb;
int err;
if (length > rt->u.dst.dev->mtu) {
ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport,
rt->u.dst.dev->mtu);
return -EMSGSIZE;
}
if (flags&MSG_PROBE)
goto out;
hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
skb = sock_alloc_send_skb(sk, length+hh_len+15,
flags&MSG_DONTWAIT, &err);
if (skb == NULL)
goto error;
skb_reserve(skb, hh_len);
skb->priority = sk->sk_priority;
skb->dst = dst_clone(&rt->u.dst);
skb->nh.iph = iph = (struct iphdr *)skb_put(skb, length);
skb->ip_summed = CHECKSUM_NONE;
skb->h.raw = skb->nh.raw;
err = memcpy_fromiovecend((void *)iph, from, 0, length);
if (err)
goto error_fault;
/* We don't modify invalid header */
if (length >= sizeof(*iph) && iph->ihl * 4U <= length) {
if (!iph->saddr)
iph->saddr = rt->rt_src;
iph->check = 0;
iph->tot_len = htons(length);
if (!iph->id)
ip_select_ident(iph, &rt->u.dst, NULL);
iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
}
err = NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
dst_output);
if (err > 0)
err = inet->recverr ? net_xmit_errno(err) : 0;
if (err)
goto error;
out:
return 0;
error_fault:
err = -EFAULT;
kfree_skb(skb);
error:
IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
return err;
}
示例2: ieee80211_if_parse_tkip_mic_test
static ssize_t ieee80211_if_parse_tkip_mic_test(
struct ieee80211_sub_if_data *sdata, const char *buf, int buflen)
{
struct ieee80211_local *local = sdata->local;
u8 addr[ETH_ALEN];
struct sk_buff *skb;
struct ieee80211_hdr *hdr;
__le16 fc;
if (!mac_pton(buf, addr))
return -EINVAL;
if (!ieee80211_sdata_running(sdata))
return -ENOTCONN;
skb = dev_alloc_skb(local->hw.extra_tx_headroom + 24 + 100);
if (!skb)
return -ENOMEM;
skb_reserve(skb, local->hw.extra_tx_headroom);
hdr = (struct ieee80211_hdr *) skb_put(skb, 24);
memset(hdr, 0, 24);
fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA);
switch (sdata->vif.type) {
case NL80211_IFTYPE_AP:
fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS);
/* DA BSSID SA */
memcpy(hdr->addr1, addr, ETH_ALEN);
memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
memcpy(hdr->addr3, sdata->vif.addr, ETH_ALEN);
break;
case NL80211_IFTYPE_STATION:
fc |= cpu_to_le16(IEEE80211_FCTL_TODS);
/* BSSID SA DA */
sdata_lock(sdata);
if (!sdata->u.mgd.associated) {
sdata_unlock(sdata);
dev_kfree_skb(skb);
return -ENOTCONN;
}
memcpy(hdr->addr1, sdata->u.mgd.associated->bssid, ETH_ALEN);
memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
memcpy(hdr->addr3, addr, ETH_ALEN);
sdata_unlock(sdata);
break;
default:
dev_kfree_skb(skb);
return -EOPNOTSUPP;
}
hdr->frame_control = fc;
/*
* Add some length to the test frame to make it look bit more valid.
* The exact contents does not matter since the recipient is required
* to drop this because of the Michael MIC failure.
*/
memset(skb_put(skb, 50), 0, 50);
IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_TKIP_MIC_FAILURE;
ieee80211_tx_skb(sdata, skb);
return buflen;
}
示例3: kim_int_recv
/**
* kim_int_recv - receive function called during firmware download
* firmware download responses on different UART drivers
* have been observed to come in bursts of different
* tty_receive and hence the logic
*/
void kim_int_recv(struct kim_data_s *kim_gdata,
const unsigned char *data, long count)
{
const unsigned char *ptr;
int len = 0, type = 0;
unsigned char *plen;
pr_debug("%s", __func__);
/* Decode received bytes here */
ptr = data;
if (unlikely(ptr == NULL)) {
pr_err(" received null from TTY ");
return;
}
while (count) {
if (kim_gdata->rx_count) {
len = min_t(unsigned int, kim_gdata->rx_count, count);
memcpy(skb_put(kim_gdata->rx_skb, len), ptr, len);
kim_gdata->rx_count -= len;
count -= len;
ptr += len;
if (kim_gdata->rx_count)
continue;
/* Check ST RX state machine , where are we? */
switch (kim_gdata->rx_state) {
/* Waiting for complete packet ? */
case ST_W4_DATA:
pr_debug("Complete pkt received");
validate_firmware_response(kim_gdata);
kim_gdata->rx_state = ST_W4_PACKET_TYPE;
kim_gdata->rx_skb = NULL;
continue;
/* Waiting for Bluetooth event header ? */
case ST_W4_HEADER:
plen =
(unsigned char *)&kim_gdata->rx_skb->data[1];
pr_debug("event hdr: plen 0x%02x\n", *plen);
kim_check_data_len(kim_gdata, *plen);
continue;
} /* end of switch */
} /* end of if rx_state */
switch (*ptr) {
/* Bluetooth event packet? */
case 0x04:
kim_gdata->rx_state = ST_W4_HEADER;
kim_gdata->rx_count = 2;
type = *ptr;
break;
default:
pr_info("unknown packet");
ptr++;
count--;
continue;
}
ptr++;
count--;
kim_gdata->rx_skb =
alloc_skb(1024+8, GFP_ATOMIC);
if (!kim_gdata->rx_skb) {
pr_err("can't allocate mem for new packet");
kim_gdata->rx_state = ST_W4_PACKET_TYPE;
kim_gdata->rx_count = 0;
return;
}
skb_reserve(kim_gdata->rx_skb, 8);
kim_gdata->rx_skb->cb[0] = 4;
kim_gdata->rx_skb->cb[1] = 0;
}
示例4: dev_net
struct sk_buff *ndisc_build_skb(struct net_device *dev,
const struct in6_addr *daddr,
const struct in6_addr *saddr,
struct icmp6hdr *icmp6h,
const struct in6_addr *target,
int llinfo)
{
struct net *net = dev_net(dev);
struct sock *sk = net->ipv6.ndisc_sk;
struct sk_buff *skb;
struct icmp6hdr *hdr;
int hlen = LL_RESERVED_SPACE(dev);
int tlen = dev->needed_tailroom;
int len;
int err;
u8 *opt;
if (!dev->addr_len)
llinfo = 0;
len = sizeof(struct icmp6hdr) + (target ? sizeof(*target) : 0);
if (llinfo)
len += ndisc_opt_addr_space(dev);
skb = sock_alloc_send_skb(sk,
(MAX_HEADER + sizeof(struct ipv6hdr) +
len + hlen + tlen),
1, &err);
if (!skb) {
ND_PRINTK0(KERN_ERR
"ICMPv6 ND: %s() failed to allocate an skb, err=%d.\n",
__func__, err);
return NULL;
}
skb_reserve(skb, hlen);
ip6_nd_hdr(sk, skb, dev, saddr, daddr, IPPROTO_ICMPV6, len);
skb->transport_header = skb->tail;
skb_put(skb, len);
hdr = (struct icmp6hdr *)skb_transport_header(skb);
memcpy(hdr, icmp6h, sizeof(*hdr));
opt = skb_transport_header(skb) + sizeof(struct icmp6hdr);
if (target) {
*(struct in6_addr *)opt = *target;
opt += sizeof(*target);
}
if (llinfo)
ndisc_fill_addr_option(opt, llinfo, dev->dev_addr,
dev->addr_len, dev->type);
hdr->icmp6_cksum = csum_ipv6_magic(saddr, daddr, len,
IPPROTO_ICMPV6,
csum_partial(hdr,
len, 0));
return skb;
}
示例5: fill_packet
struct sk_buff *
fill_packet(struct net_device *odev, __u32 saddr)
{
struct sk_buff *skb;
__u8 *eth;
struct udphdr *udph;
int datalen, iplen;
struct iphdr *iph;
skb = alloc_skb(pkt_size+64+16, GFP_ATOMIC);
if (!skb) {
sprintf(pg_result, "No memory");
return NULL;
}
skb_reserve(skb, 16);
/* Reserve for ethernet and IP header */
eth = (__u8 *) skb_push(skb, 14);
iph = (struct iphdr*)skb_put(skb, sizeof( struct iphdr));
udph = (struct udphdr*)skb_put(skb, sizeof( struct udphdr));
/* Copy the ethernet header */
memcpy(eth, hh, 14);
datalen = pkt_size-14-20-8; /* Eth + IPh + UDPh */
if (datalen < 0)
datalen = 0;
udph->source= htons(9);
udph->dest= htons(9);
udph->len= htons(datalen+8); /* DATA + udphdr */
udph->check=0; /* No checksum */
iph->ihl=5;
iph->version=4;
iph->ttl=3;
iph->tos=0;
iph->protocol = IPPROTO_UDP; /* UDP */
iph->saddr = saddr;
iph->daddr = in_aton(pg_dst);
iph->frag_off = 0;
iplen = 20 + 8 + datalen;
iph->tot_len = htons(iplen);
iph->check = 0;
iph->check = ip_fast_csum((void *)iph, iph->ihl);
skb->protocol = __constant_htons(ETH_P_IP);
skb->mac.raw = ((u8*)iph) - 14;
skb->dev = odev;
skb->pkt_type = PACKET_HOST;
if (nfrags<=0) {
skb_put(skb, datalen);
} else {
int frags = nfrags;
int i;
if (frags > MAX_SKB_FRAGS)
frags = MAX_SKB_FRAGS;
if (datalen > frags*PAGE_SIZE) {
skb_put(skb, datalen-frags*PAGE_SIZE);
datalen = frags*PAGE_SIZE;
}
i = 0;
while (datalen > 0) {
struct page *page = alloc_pages(GFP_KERNEL, 0);
skb_shinfo(skb)->frags[i].page = page;
skb_shinfo(skb)->frags[i].page_offset = 0;
skb_shinfo(skb)->frags[i].size = (datalen < PAGE_SIZE ? datalen : PAGE_SIZE);
datalen -= skb_shinfo(skb)->frags[i].size;
skb->len += skb_shinfo(skb)->frags[i].size;
skb->data_len += skb_shinfo(skb)->frags[i].size;
i++;
skb_shinfo(skb)->nr_frags = i;
}
while (i < frags) {
int rem;
if (i == 0)
break;
rem = skb_shinfo(skb)->frags[i-1].size/2;
if (rem == 0)
break;
skb_shinfo(skb)->frags[i-1].size -= rem;
skb_shinfo(skb)->frags[i] = skb_shinfo(skb)->frags[i-1];
get_page(skb_shinfo(skb)->frags[i].page);
skb_shinfo(skb)->frags[i].page = skb_shinfo(skb)->frags[i-1].page;
skb_shinfo(skb)->frags[i].page_offset += skb_shinfo(skb)->frags[i-1].size;
skb_shinfo(skb)->frags[i].size = rem;
i++;
skb_shinfo(skb)->nr_frags = i;
}
}
return skb;
//.........这里部分代码省略.........
示例6: mesh_path_sel_frame_tx
static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
const u8 *orig_addr, u32 orig_sn,
u8 target_flags, const u8 *target,
u32 target_sn, const u8 *da,
u8 hop_count, u8 ttl,
u32 lifetime, u32 metric, u32 preq_id,
struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_local *local = sdata->local;
struct sk_buff *skb;
struct ieee80211_mgmt *mgmt;
u8 *pos, ie_len;
int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.mesh_action) +
sizeof(mgmt->u.action.u.mesh_action);
skb = dev_alloc_skb(local->tx_headroom +
hdr_len +
2 + 37); /* max HWMP IE */
if (!skb)
return -1;
skb_reserve(skb, local->tx_headroom);
mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
memset(mgmt, 0, hdr_len);
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_ACTION);
memcpy(mgmt->da, da, ETH_ALEN);
memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
/* BSSID == SA */
memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
mgmt->u.action.category = WLAN_CATEGORY_MESH_ACTION;
mgmt->u.action.u.mesh_action.action_code =
WLAN_MESH_ACTION_HWMP_PATH_SELECTION;
switch (action) {
case MPATH_PREQ:
mhwmp_dbg(sdata, "sending PREQ to %pM\n", target);
ie_len = 37;
pos = skb_put(skb, 2 + ie_len);
*pos++ = WLAN_EID_PREQ;
break;
case MPATH_PREP:
mhwmp_dbg(sdata, "sending PREP to %pM\n", orig_addr);
ie_len = 31;
pos = skb_put(skb, 2 + ie_len);
*pos++ = WLAN_EID_PREP;
break;
case MPATH_RANN:
mhwmp_dbg(sdata, "sending RANN from %pM\n", orig_addr);
ie_len = sizeof(struct ieee80211_rann_ie);
pos = skb_put(skb, 2 + ie_len);
*pos++ = WLAN_EID_RANN;
break;
default:
kfree_skb(skb);
return -ENOTSUPP;
}
*pos++ = ie_len;
*pos++ = flags;
*pos++ = hop_count;
*pos++ = ttl;
if (action == MPATH_PREP) {
memcpy(pos, target, ETH_ALEN);
pos += ETH_ALEN;
put_unaligned_le32(target_sn, pos);
pos += 4;
} else {
if (action == MPATH_PREQ) {
put_unaligned_le32(preq_id, pos);
pos += 4;
}
memcpy(pos, orig_addr, ETH_ALEN);
pos += ETH_ALEN;
put_unaligned_le32(orig_sn, pos);
pos += 4;
}
put_unaligned_le32(lifetime, pos); /* interval for RANN */
pos += 4;
put_unaligned_le32(metric, pos);
pos += 4;
if (action == MPATH_PREQ) {
*pos++ = 1; /* destination count */
*pos++ = target_flags;
memcpy(pos, target, ETH_ALEN);
pos += ETH_ALEN;
put_unaligned_le32(target_sn, pos);
pos += 4;
} else if (action == MPATH_PREP) {
memcpy(pos, orig_addr, ETH_ALEN);
pos += ETH_ALEN;
put_unaligned_le32(orig_sn, pos);
pos += 4;
}
ieee80211_tx_skb(sdata, skb);
return 0;
}
示例7: mesh_path_error_tx
/**
* mesh_path_error_tx - Sends a PERR mesh management frame
*
* @ttl: allowed remaining hops
* @target: broken destination
* @target_sn: SN of the broken destination
* @target_rcode: reason code for this PERR
* @ra: node this frame is addressed to
* @sdata: local mesh subif
*
* Note: This function may be called with driver locks taken that the driver
* also acquires in the TX path. To avoid a deadlock we don't transmit the
* frame directly but add it to the pending queue instead.
*/
int mesh_path_error_tx(struct ieee80211_sub_if_data *sdata,
u8 ttl, const u8 *target, u32 target_sn,
u16 target_rcode, const u8 *ra)
{
struct ieee80211_local *local = sdata->local;
struct sk_buff *skb;
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
struct ieee80211_mgmt *mgmt;
u8 *pos, ie_len;
int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.mesh_action) +
sizeof(mgmt->u.action.u.mesh_action);
if (time_before(jiffies, ifmsh->next_perr))
return -EAGAIN;
skb = dev_alloc_skb(local->tx_headroom +
sdata->encrypt_headroom +
IEEE80211_ENCRYPT_TAILROOM +
hdr_len +
2 + 15 /* PERR IE */);
if (!skb)
return -1;
skb_reserve(skb, local->tx_headroom + sdata->encrypt_headroom);
mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
memset(mgmt, 0, hdr_len);
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_ACTION);
memcpy(mgmt->da, ra, ETH_ALEN);
memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
/* BSSID == SA */
memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
mgmt->u.action.category = WLAN_CATEGORY_MESH_ACTION;
mgmt->u.action.u.mesh_action.action_code =
WLAN_MESH_ACTION_HWMP_PATH_SELECTION;
ie_len = 15;
pos = skb_put(skb, 2 + ie_len);
*pos++ = WLAN_EID_PERR;
*pos++ = ie_len;
/* ttl */
*pos++ = ttl;
/* number of destinations */
*pos++ = 1;
/* Flags field has AE bit only as defined in
* sec 8.4.2.117 IEEE802.11-2012
*/
*pos = 0;
pos++;
memcpy(pos, target, ETH_ALEN);
pos += ETH_ALEN;
put_unaligned_le32(target_sn, pos);
pos += 4;
put_unaligned_le16(target_rcode, pos);
/* see note in function header */
prepare_frame_for_deferred_tx(sdata, skb);
ifmsh->next_perr = TU_TO_EXP_TIME(
ifmsh->mshcfg.dot11MeshHWMPperrMinInterval);
ieee80211_add_pending_skb(local, skb);
return 0;
}
示例8: rx_submit
static int
rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
{
struct sk_buff *skb;
int retval = -ENOMEM;
size_t size = 0;
struct usb_ep *out;
unsigned long flags;
unsigned short reserve_headroom;
spin_lock_irqsave(&dev->lock, flags);
if (dev->port_usb)
out = dev->port_usb->out_ep;
else
out = NULL;
spin_unlock_irqrestore(&dev->lock, flags);
if (!out)
return -ENOTCONN;
size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA;
size += dev->port_usb->header_len;
size += out->maxpacket - 1;
size -= size % out->maxpacket;
if (dev->ul_max_pkts_per_xfer)
size *= dev->ul_max_pkts_per_xfer;
if (dev->port_usb->is_fixed)
size = max_t(size_t, size, dev->port_usb->fixed_out_len);
if (dev->rx_needed_headroom)
reserve_headroom = dev->rx_needed_headroom;
else
reserve_headroom = NET_IP_ALIGN;
pr_debug("%s: size: %zu + %d(hr)", __func__, size, reserve_headroom);
skb = alloc_skb(size + reserve_headroom, gfp_flags);
if (skb == NULL) {
DBG(dev, "no rx skb\n");
goto enomem;
}
skb_reserve(skb, reserve_headroom);
req->buf = skb->data;
req->length = size;
req->context = skb;
retval = usb_ep_queue(out, req, gfp_flags);
if (retval == -ENOMEM)
enomem:
defer_kevent(dev, WORK_RX_MEMORY);
if (retval) {
DBG(dev, "rx submit --> %d\n", retval);
if (skb)
dev_kfree_skb_any(skb);
}
return retval;
}
示例9: rt2x00mac_tx_rts_cts
static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev,
struct data_queue *queue,
struct sk_buff *frag_skb)
{
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(frag_skb);
struct ieee80211_tx_info *rts_info;
struct sk_buff *skb;
unsigned int data_length;
int retval = 0;
if (tx_info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
data_length = sizeof(struct ieee80211_cts);
else
data_length = sizeof(struct ieee80211_rts);
skb = dev_alloc_skb(data_length + rt2x00dev->hw->extra_tx_headroom);
if (unlikely(!skb)) {
rt2x00_warn(rt2x00dev, "Failed to create RTS/CTS frame\n");
return -ENOMEM;
}
skb_reserve(skb, rt2x00dev->hw->extra_tx_headroom);
skb_put(skb, data_length);
/*
* Copy TX information over from original frame to
* RTS/CTS frame. Note that we set the no encryption flag
* since we don't want this frame to be encrypted.
* RTS frames should be acked, while CTS-to-self frames
* should not. The ready for TX flag is cleared to prevent
* it being automatically send when the descriptor is
* written to the hardware.
*/
memcpy(skb->cb, frag_skb->cb, sizeof(skb->cb));
rts_info = IEEE80211_SKB_CB(skb);
rts_info->control.rates[0].flags &= ~IEEE80211_TX_RC_USE_RTS_CTS;
rts_info->control.rates[0].flags &= ~IEEE80211_TX_RC_USE_CTS_PROTECT;
if (tx_info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
rts_info->flags |= IEEE80211_TX_CTL_NO_ACK;
else
rts_info->flags &= ~IEEE80211_TX_CTL_NO_ACK;
/* Disable hardware encryption */
rts_info->control.hw_key = NULL;
/*
* RTS/CTS frame should use the length of the frame plus any
* encryption overhead that will be added by the hardware.
*/
data_length += rt2x00crypto_tx_overhead(rt2x00dev, skb);
if (tx_info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
ieee80211_ctstoself_get(rt2x00dev->hw, tx_info->control.vif,
frag_skb->data, data_length, tx_info,
(struct ieee80211_cts *)(skb->data));
else
ieee80211_rts_get(rt2x00dev->hw, tx_info->control.vif,
frag_skb->data, data_length, tx_info,
(struct ieee80211_rts *)(skb->data));
retval = rt2x00queue_write_tx_frame(queue, skb, NULL, true);
if (retval) {
dev_kfree_skb_any(skb);
rt2x00_warn(rt2x00dev, "Failed to send RTS/CTS frame\n");
}
return retval;
}
示例10: tulip_rx
static int tulip_rx(struct net_device *dev)
{
struct tulip_private *tp = netdev_priv(dev);
int entry = tp->cur_rx % RX_RING_SIZE;
int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
int received = 0;
if (tulip_debug > 4)
printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
tp->rx_ring[entry].status);
/* If we own the next entry, it is a new packet. Send it up. */
while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
s32 status = le32_to_cpu(tp->rx_ring[entry].status);
if (tulip_debug > 5)
printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
dev->name, entry, status);
if (--rx_work_limit < 0)
break;
if ((status & 0x38008300) != 0x0300) {
if ((status & 0x38000300) != 0x0300) {
/* Ingore earlier buffers. */
if ((status & 0xffff) != 0x7fff) {
if (tulip_debug > 1)
printk(KERN_WARNING "%s: Oversized Ethernet frame "
"spanned multiple buffers, status %8.8x!\n",
dev->name, status);
tp->stats.rx_length_errors++;
}
} else if (status & RxDescFatalErr) {
/* There was a fatal error. */
if (tulip_debug > 2)
printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
dev->name, status);
tp->stats.rx_errors++; /* end of a packet.*/
if (status & 0x0890) tp->stats.rx_length_errors++;
if (status & 0x0004) tp->stats.rx_frame_errors++;
if (status & 0x0002) tp->stats.rx_crc_errors++;
if (status & 0x0001) tp->stats.rx_fifo_errors++;
}
} else {
/* Omit the four octet CRC from the length. */
short pkt_len = ((status >> 16) & 0x7ff) - 4;
struct sk_buff *skb;
#ifndef final_version
if (pkt_len > 1518) {
printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
dev->name, pkt_len, pkt_len);
pkt_len = 1518;
tp->stats.rx_length_errors++;
}
#endif
/* Check if the packet is long enough to accept without copying
to a minimally-sized skbuff. */
if (pkt_len < tulip_rx_copybreak
&& (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
skb_reserve(skb, 2); /* 16 byte align the IP header */
pci_dma_sync_single_for_cpu(tp->pdev,
tp->rx_buffers[entry].mapping,
pkt_len, PCI_DMA_FROMDEVICE);
#if ! defined(__alpha__)
eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->data,
pkt_len, 0);
skb_put(skb, pkt_len);
#else
memcpy(skb_put(skb, pkt_len),
tp->rx_buffers[entry].skb->data,
pkt_len);
#endif
pci_dma_sync_single_for_device(tp->pdev,
tp->rx_buffers[entry].mapping,
pkt_len, PCI_DMA_FROMDEVICE);
} else { /* Pass up the skb already on the Rx ring. */
char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
pkt_len);
#ifndef final_version
if (tp->rx_buffers[entry].mapping !=
le32_to_cpu(tp->rx_ring[entry].buffer1)) {
printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
"do not match in tulip_rx: %08x vs. %Lx %p / %p.\n",
dev->name,
le32_to_cpu(tp->rx_ring[entry].buffer1),
(long long)tp->rx_buffers[entry].mapping,
skb->head, temp);
}
#endif
pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
tp->rx_buffers[entry].skb = NULL;
tp->rx_buffers[entry].mapping = 0;
}
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
//.........这里部分代码省略.........
示例11: tulip_poll
int tulip_poll(struct net_device *dev, int *budget)
{
struct tulip_private *tp = netdev_priv(dev);
int entry = tp->cur_rx % RX_RING_SIZE;
int rx_work_limit = *budget;
int received = 0;
if (!netif_running(dev))
goto done;
if (rx_work_limit > dev->quota)
rx_work_limit = dev->quota;
#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
/* that one buffer is needed for mit activation; or might be a
bug in the ring buffer code; check later -- JHS*/
if (rx_work_limit >=RX_RING_SIZE) rx_work_limit--;
#endif
if (tulip_debug > 4)
printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
tp->rx_ring[entry].status);
do {
if (ioread32(tp->base_addr + CSR5) == 0xffffffff) {
printk(KERN_DEBUG " In tulip_poll(), hardware disappeared.\n");
break;
}
/* Acknowledge current RX interrupt sources. */
iowrite32((RxIntr | RxNoBuf), tp->base_addr + CSR5);
/* If we own the next entry, it is a new packet. Send it up. */
while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
s32 status = le32_to_cpu(tp->rx_ring[entry].status);
if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx)
break;
if (tulip_debug > 5)
printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
dev->name, entry, status);
if (--rx_work_limit < 0)
goto not_done;
if ((status & 0x38008300) != 0x0300) {
if ((status & 0x38000300) != 0x0300) {
/* Ingore earlier buffers. */
if ((status & 0xffff) != 0x7fff) {
if (tulip_debug > 1)
printk(KERN_WARNING "%s: Oversized Ethernet frame "
"spanned multiple buffers, status %8.8x!\n",
dev->name, status);
tp->stats.rx_length_errors++;
}
} else if (status & RxDescFatalErr) {
/* There was a fatal error. */
if (tulip_debug > 2)
printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
dev->name, status);
tp->stats.rx_errors++; /* end of a packet.*/
if (status & 0x0890) tp->stats.rx_length_errors++;
if (status & 0x0004) tp->stats.rx_frame_errors++;
if (status & 0x0002) tp->stats.rx_crc_errors++;
if (status & 0x0001) tp->stats.rx_fifo_errors++;
}
} else {
/* Omit the four octet CRC from the length. */
short pkt_len = ((status >> 16) & 0x7ff) - 4;
struct sk_buff *skb;
#ifndef final_version
if (pkt_len > 1518) {
printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
dev->name, pkt_len, pkt_len);
pkt_len = 1518;
tp->stats.rx_length_errors++;
}
#endif
/* Check if the packet is long enough to accept without copying
to a minimally-sized skbuff. */
if (pkt_len < tulip_rx_copybreak
&& (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
skb_reserve(skb, 2); /* 16 byte align the IP header */
pci_dma_sync_single_for_cpu(tp->pdev,
tp->rx_buffers[entry].mapping,
pkt_len, PCI_DMA_FROMDEVICE);
#if ! defined(__alpha__)
eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->data,
pkt_len, 0);
skb_put(skb, pkt_len);
#else
memcpy(skb_put(skb, pkt_len),
tp->rx_buffers[entry].skb->data,
pkt_len);
#endif
pci_dma_sync_single_for_device(tp->pdev,
//.........这里部分代码省略.........
示例12: nf_send_reset6
void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
{
struct sk_buff *nskb;
struct tcphdr _otcph;
const struct tcphdr *otcph;
unsigned int otcplen, hh_len;
const struct ipv6hdr *oip6h = ipv6_hdr(oldskb);
struct ipv6hdr *ip6h;
struct dst_entry *dst = NULL;
struct flowi6 fl6;
if ((!(ipv6_addr_type(&oip6h->saddr) & IPV6_ADDR_UNICAST)) ||
(!(ipv6_addr_type(&oip6h->daddr) & IPV6_ADDR_UNICAST))) {
pr_debug("addr is not unicast.\n");
return;
}
otcph = nf_reject_ip6_tcphdr_get(oldskb, &_otcph, &otcplen, hook);
if (!otcph)
return;
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_proto = IPPROTO_TCP;
fl6.saddr = oip6h->daddr;
fl6.daddr = oip6h->saddr;
fl6.fl6_sport = otcph->dest;
fl6.fl6_dport = otcph->source;
security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6));
dst = ip6_route_output(net, NULL, &fl6);
if (dst == NULL || dst->error) {
dst_release(dst);
return;
}
dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0);
if (IS_ERR(dst))
return;
hh_len = (dst->dev->hard_header_len + 15)&~15;
nskb = alloc_skb(hh_len + 15 + dst->header_len + sizeof(struct ipv6hdr)
+ sizeof(struct tcphdr) + dst->trailer_len,
GFP_ATOMIC);
if (!nskb) {
net_dbg_ratelimited("cannot alloc skb\n");
dst_release(dst);
return;
}
skb_dst_set(nskb, dst);
skb_reserve(nskb, hh_len + dst->header_len);
ip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP,
ip6_dst_hoplimit(dst));
nf_reject_ip6_tcphdr_put(nskb, oldskb, otcph, otcplen);
nf_ct_attach(nskb, oldskb);
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
/* If we use ip6_local_out for bridged traffic, the MAC source on
* the RST will be ours, instead of the destination's. This confuses
* some routers/firewalls, and they drop the packet. So we need to
* build the eth header using the original destination's MAC as the
* source, and send the RST packet directly.
*/
if (oldskb->nf_bridge) {
struct ethhdr *oeth = eth_hdr(oldskb);
nskb->dev = nf_bridge_get_physindev(oldskb);
nskb->protocol = htons(ETH_P_IPV6);
ip6h->payload_len = htons(sizeof(struct tcphdr));
if (dev_hard_header(nskb, nskb->dev, ntohs(nskb->protocol),
oeth->h_source, oeth->h_dest, nskb->len) < 0)
return;
dev_queue_xmit(nskb);
} else
#endif
ip6_local_out(net, nskb->sk, nskb);
}
示例13: nci_queue_tx_data_frags
static int nci_queue_tx_data_frags(struct nci_dev *ndev,
__u8 conn_id,
struct sk_buff *skb) {
struct nci_conn_info *conn_info;
int total_len = skb->len;
unsigned char *data = skb->data;
unsigned long flags;
struct sk_buff_head frags_q;
struct sk_buff *skb_frag;
int frag_len;
int rc = 0;
pr_debug("conn_id 0x%x, total_len %d\n", conn_id, total_len);
conn_info = nci_get_conn_info_by_conn_id(ndev, conn_id);
if (!conn_info) {
rc = -EPROTO;
goto free_exit;
}
__skb_queue_head_init(&frags_q);
while (total_len) {
frag_len =
min_t(int, total_len, conn_info->max_pkt_payload_len);
skb_frag = nci_skb_alloc(ndev,
(NCI_DATA_HDR_SIZE + frag_len),
GFP_KERNEL);
if (skb_frag == NULL) {
rc = -ENOMEM;
goto free_exit;
}
skb_reserve(skb_frag, NCI_DATA_HDR_SIZE);
/* first, copy the data */
memcpy(skb_put(skb_frag, frag_len), data, frag_len);
/* second, set the header */
nci_push_data_hdr(ndev, conn_id, skb_frag,
((total_len == frag_len) ?
(NCI_PBF_LAST) : (NCI_PBF_CONT)));
__skb_queue_tail(&frags_q, skb_frag);
data += frag_len;
total_len -= frag_len;
pr_debug("frag_len %d, remaining total_len %d\n",
frag_len, total_len);
}
/* queue all fragments atomically */
spin_lock_irqsave(&ndev->tx_q.lock, flags);
while ((skb_frag = __skb_dequeue(&frags_q)) != NULL)
__skb_queue_tail(&ndev->tx_q, skb_frag);
spin_unlock_irqrestore(&ndev->tx_q.lock, flags);
/* free the original skb */
kfree_skb(skb);
goto exit;
free_exit:
while ((skb_frag = __skb_dequeue(&frags_q)) != NULL)
kfree_skb(skb_frag);
exit:
return rc;
}
示例14: netdev_dbg
static struct sk_buff *rtllib_ADDBA(struct rtllib_device *ieee, u8 *Dst,
struct ba_record *pBA,
u16 StatusCode, u8 type)
{
struct sk_buff *skb = NULL;
struct rtllib_hdr_3addr *BAReq = NULL;
u8 *tag = NULL;
u16 len = ieee->tx_headroom + 9;
netdev_dbg(ieee->dev, "%s(): frame(%d) sentd to: %pM, ieee->dev:%p\n",
__func__, type, Dst, ieee->dev);
if (pBA == NULL) {
netdev_warn(ieee->dev, "pBA is NULL\n");
return NULL;
}
skb = dev_alloc_skb(len + sizeof(struct rtllib_hdr_3addr));
if (skb == NULL)
return NULL;
memset(skb->data, 0, sizeof(struct rtllib_hdr_3addr));
skb_reserve(skb, ieee->tx_headroom);
BAReq = (struct rtllib_hdr_3addr *)skb_put(skb,
sizeof(struct rtllib_hdr_3addr));
ether_addr_copy(BAReq->addr1, Dst);
ether_addr_copy(BAReq->addr2, ieee->dev->dev_addr);
ether_addr_copy(BAReq->addr3, ieee->current_network.bssid);
BAReq->frame_ctl = cpu_to_le16(RTLLIB_STYPE_MANAGE_ACT);
tag = (u8 *)skb_put(skb, 9);
*tag++ = ACT_CAT_BA;
*tag++ = type;
*tag++ = pBA->DialogToken;
if (ACT_ADDBARSP == type) {
RT_TRACE(COMP_DBG, "====>to send ADDBARSP\n");
put_unaligned_le16(StatusCode, tag);
tag += 2;
}
put_unaligned_le16(pBA->BaParamSet.shortData, tag);
tag += 2;
put_unaligned_le16(pBA->BaTimeoutValue, tag);
tag += 2;
if (ACT_ADDBAREQ == type) {
memcpy(tag, (u8 *)&(pBA->BaStartSeqCtrl), 2);
tag += 2;
}
#ifdef VERBOSE_DEBUG
print_hex_dump_bytes("rtllib_ADDBA(): ", DUMP_PREFIX_NONE, skb->data,
skb->len);
#endif
return skb;
}
示例15: rtllib_xmit_inter
//.........这里部分代码省略.........
nr_frags++;
else
bytes_last_frag = bytes_per_frag;
/* When we allocate the TXB we allocate enough space for the
* reserve and full fragment bytes (bytes_per_frag doesn't
* include prefix, postfix, header, FCS, etc.) */
txb = rtllib_alloc_txb(nr_frags, frag_size +
ieee->tx_headroom, GFP_ATOMIC);
if (unlikely(!txb)) {
printk(KERN_WARNING "%s: Could not allocate TXB\n",
ieee->dev->name);
goto failed;
}
txb->encrypted = encrypt;
txb->payload_size = bytes;
if (qos_actived)
txb->queue_index = UP2AC(skb->priority);
else
txb->queue_index = WME_AC_BE;
for (i = 0; i < nr_frags; i++) {
skb_frag = txb->fragments[i];
tcb_desc = (struct cb_desc *)(skb_frag->cb +
MAX_DEV_ADDR_SIZE);
if (qos_actived) {
skb_frag->priority = skb->priority;
tcb_desc->queue_index = UP2AC(skb->priority);
} else {
skb_frag->priority = WME_AC_BE;
tcb_desc->queue_index = WME_AC_BE;
}
skb_reserve(skb_frag, ieee->tx_headroom);
if (encrypt) {
if (ieee->hwsec_active)
tcb_desc->bHwSec = 1;
else
tcb_desc->bHwSec = 0;
skb_reserve(skb_frag,
crypt->ops->extra_mpdu_prefix_len +
crypt->ops->extra_msdu_prefix_len);
} else {
tcb_desc->bHwSec = 0;
}
frag_hdr = (struct rtllib_hdr_3addrqos *)
skb_put(skb_frag, hdr_len);
memcpy(frag_hdr, &header, hdr_len);
/* If this is not the last fragment, then add the
* MOREFRAGS bit to the frame control */
if (i != nr_frags - 1) {
frag_hdr->frame_ctl = cpu_to_le16(
fc | RTLLIB_FCTL_MOREFRAGS);
bytes = bytes_per_frag;
} else {
/* The last fragment has the remaining length */
bytes = bytes_last_frag;
}
if ((qos_actived) && (!bIsMulticast)) {
frag_hdr->seq_ctl =
rtllib_query_seqnum(ieee, skb_frag,
header.addr1);
frag_hdr->seq_ctl =