本文整理汇总了C++中skb_frag_size函数的典型用法代码示例。如果您正苦于以下问题:C++ skb_frag_size函数的具体用法?C++ skb_frag_size怎么用?C++ skb_frag_size使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了skb_frag_size函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: iwl_pcie_gen2_tx_add_frags
static int iwl_pcie_gen2_tx_add_frags(struct iwl_trans *trans,
struct sk_buff *skb,
struct iwl_tfh_tfd *tfd,
struct iwl_cmd_meta *out_meta)
{
int i;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
dma_addr_t tb_phys;
int tb_idx;
if (!skb_frag_size(frag))
continue;
tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
skb_frag_size(frag), DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
return -ENOMEM;
tb_idx = iwl_pcie_gen2_set_tb(trans, tfd, tb_phys,
skb_frag_size(frag));
trace_iwlwifi_dev_tx_tb(trans->dev, skb,
skb_frag_address(frag),
skb_frag_size(frag));
if (tb_idx < 0)
return tb_idx;
out_meta->tbs |= BIT(tb_idx);
}
return 0;
}
示例2: tso_get_fragment
static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
skb_frag_t *frag)
{
st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0,
skb_frag_size(frag), DMA_TO_DEVICE);
if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
st->dma_flags = 0;
st->unmap_len = skb_frag_size(frag);
st->in_len = skb_frag_size(frag);
st->dma_addr = st->unmap_addr;
return 0;
}
return -ENOMEM;
}
示例3: build_inline_wqe
static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc,
const struct sk_buff *skb,
const struct skb_shared_info *shinfo,
void *fragptr)
{
struct mlx4_wqe_inline_seg *inl = &tx_desc->inl;
int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - sizeof(*inl);
unsigned int hlen = skb_headlen(skb);
if (skb->len <= spc) {
if (likely(skb->len >= MIN_PKT_LEN)) {
inl->byte_count = cpu_to_be32(1 << 31 | skb->len);
} else {
inl->byte_count = cpu_to_be32(1 << 31 | MIN_PKT_LEN);
memset(((void *)(inl + 1)) + skb->len, 0,
MIN_PKT_LEN - skb->len);
}
skb_copy_from_linear_data(skb, inl + 1, hlen);
if (shinfo->nr_frags)
memcpy(((void *)(inl + 1)) + hlen, fragptr,
skb_frag_size(&shinfo->frags[0]));
} else {
inl->byte_count = cpu_to_be32(1 << 31 | spc);
if (hlen <= spc) {
skb_copy_from_linear_data(skb, inl + 1, hlen);
if (hlen < spc) {
memcpy(((void *)(inl + 1)) + hlen,
fragptr, spc - hlen);
fragptr += spc - hlen;
}
inl = (void *) (inl + 1) + spc;
memcpy(((void *)(inl + 1)), fragptr, skb->len - spc);
} else {
skb_copy_from_linear_data(skb, inl + 1, spc);
inl = (void *) (inl + 1) + spc;
skb_copy_from_linear_data_offset(skb, spc, inl + 1,
hlen - spc);
if (shinfo->nr_frags)
memcpy(((void *)(inl + 1)) + hlen - spc,
fragptr,
skb_frag_size(&shinfo->frags[0]));
}
dma_wmb();
inl->byte_count = cpu_to_be32(1 << 31 | (skb->len - spc));
}
}
示例4: xen_netbk_count_skb_slots
/*
* Figure out how many ring slots we're going to need to send @skb to
* the guest. This function is essentially a dry run of
* netbk_gop_frag_copy.
*/
unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
{
unsigned int count;
int i, copy_off;
count = DIV_ROUND_UP(
offset_in_page(skb->data)+skb_headlen(skb), PAGE_SIZE);
copy_off = skb_headlen(skb) % PAGE_SIZE;
if (skb_shinfo(skb)->gso_size)
count++;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
unsigned long bytes;
while (size > 0) {
BUG_ON(copy_off > MAX_BUFFER_OFFSET);
if (start_new_rx_buffer(copy_off, size, 0)) {
count++;
copy_off = 0;
}
bytes = size;
if (copy_off + bytes > MAX_BUFFER_OFFSET)
bytes = MAX_BUFFER_OFFSET - copy_off;
copy_off += bytes;
size -= bytes;
}
}
return count;
}
示例5: prep_msg
static int prep_msg(struct vector_private *vp,
struct sk_buff *skb,
struct iovec *iov)
{
int iov_index = 0;
int nr_frags, frag;
skb_frag_t *skb_frag;
nr_frags = skb_shinfo(skb)->nr_frags;
if (nr_frags > MAX_IOV_SIZE) {
if (skb_linearize(skb) != 0)
goto drop;
}
if (vp->header_size > 0) {
iov[iov_index].iov_len = vp->header_size;
vp->form_header(iov[iov_index].iov_base, skb, vp);
iov_index++;
}
iov[iov_index].iov_base = skb->data;
if (nr_frags > 0) {
iov[iov_index].iov_len = skb->len - skb->data_len;
vp->estats.sg_ok++;
} else
iov[iov_index].iov_len = skb->len;
iov_index++;
for (frag = 0; frag < nr_frags; frag++) {
skb_frag = &skb_shinfo(skb)->frags[frag];
iov[iov_index].iov_base = skb_frag_address_safe(skb_frag);
iov[iov_index].iov_len = skb_frag_size(skb_frag);
iov_index++;
}
return iov_index;
drop:
return -1;
}
示例6: init_page_array
static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
struct hv_page_buffer *pb)
{
u32 slots_used = 0;
char *data = skb->data;
int frags = skb_shinfo(skb)->nr_frags;
int i;
/* The packet is laid out thus:
* 1. hdr
* 2. skb linear data
* 3. skb fragment data
*/
if (hdr != NULL)
slots_used += fill_pg_buf(virt_to_page(hdr),
offset_in_page(hdr),
len, &pb[slots_used]);
slots_used += fill_pg_buf(virt_to_page(data),
offset_in_page(data),
skb_headlen(skb), &pb[slots_used]);
for (i = 0; i < frags; i++) {
skb_frag_t *frag = skb_shinfo(skb)->frags + i;
slots_used += fill_pg_buf(skb_frag_page(frag),
frag->page_offset,
skb_frag_size(frag), &pb[slots_used]);
}
return slots_used;
}
示例7: __extend_pgfrags
/**
* Somewhat like skb_shift().
*
* Beware: @from can be equal to MAX_SKB_FRAGS if we need to insert a new
* fragment after the last one.
*/
static int
__extend_pgfrags(struct sk_buff *skb, struct sk_buff *pskb, int from, int n)
{
int i, n_frag = 0;
struct skb_shared_info *psi, *si = skb_shinfo(skb);
if (skb_shinfo(skb)->nr_frags > MAX_SKB_FRAGS - n) {
skb_frag_t *f;
struct sk_buff *skb_frag;
psi = pskb ? skb_shinfo(pskb) : si;
skb_frag = psi->frag_list;
n_frag = skb_shinfo(skb)->nr_frags + n - MAX_SKB_FRAGS;
if (skb_frag && !skb_headlen(skb_frag)
&& skb_shinfo(skb_frag)->nr_frags <= MAX_SKB_FRAGS - n_frag)
{
int r = __extend_pgfrags(skb_frag, NULL, 0, n_frag);
if (r)
return r;
} else {
skb_frag = alloc_skb(0, GFP_ATOMIC);
if (!skb_frag)
return -ENOMEM;
skb_frag->next = psi->frag_list;
psi->frag_list = skb_frag;
}
for (i = n_frag - 1;
i >= 0 && MAX_SKB_FRAGS - n + i >= from; --i)
{
f = &si->frags[MAX_SKB_FRAGS - n + i];
skb_shinfo(skb_frag)->frags[i] = *f;
ss_skb_adjust_data_len(skb, -skb_frag_size(f));
ss_skb_adjust_data_len(skb_frag, skb_frag_size(f));
}
skb_shinfo(skb_frag)->nr_frags += n_frag;
skb->ip_summed = CHECKSUM_PARTIAL;
skb_frag->ip_summed = CHECKSUM_PARTIAL;
}
memmove(&si->frags[from + n], &si->frags[from],
(si->nr_frags - from - n_frag) * sizeof(skb_frag_t));
si->nr_frags += n - n_frag;
return 0;
}
示例8: __extend_pgfrags
/**
* Somewhat like skb_shift().
* Make room for @n fragments starting with slot @from.
*
* Beware: @from can be equal to MAX_SKB_FRAGS when a new fragment
* is inserted after the last one.
*
* @return 0 on success, -errno on failure.
* @return New SKB in @it->skb if new SKB is allocated.
*/
static int
__extend_pgfrags(struct sk_buff *skb, int from, int n, TfwStr *it)
{
int i, n_shift, n_excess = 0;
struct skb_shared_info *si = skb_shinfo(skb);
BUG_ON(from > si->nr_frags);
/* No room for @n extra page fragments in the SKB. */
if (si->nr_frags + n > MAX_SKB_FRAGS) {
skb_frag_t *f;
struct sk_buff *nskb;
/* Allocate a new SKB to hold @n_excess page fragments. */
nskb = alloc_skb(0, GFP_ATOMIC);
if (nskb == NULL)
return -ENOMEM;
/*
* The number of page fragments that don't fit in the SKB
* after the room is prepared for @n page fragments.
*/
n_excess = si->nr_frags + n - MAX_SKB_FRAGS;
/* Shift @n_excess number of page fragments to new SKB. */
if (from < si->nr_frags) {
for (i = n_excess - 1; i >= 0; --i) {
f = &si->frags[MAX_SKB_FRAGS - n + i];
skb_shinfo(nskb)->frags[i] = *f;
ss_skb_adjust_data_len(skb, -skb_frag_size(f));
ss_skb_adjust_data_len(nskb, skb_frag_size(f));
}
}
skb_shinfo(nskb)->nr_frags += n_excess;
it->skb = nskb;
}
/* Make room for @n page fragments in the SKB. */
n_shift = si->nr_frags - from - n_excess;
BUG_ON(n_shift < 0);
if (n_shift)
memmove(&si->frags[from + n],
&si->frags[from], n_shift * sizeof(skb_frag_t));
si->nr_frags += n - n_excess;
return 0;
}
示例9: netbk_gop_skb
/*
* Prepare an SKB to be transmitted to the frontend.
*
* This function is responsible for allocating grant operations, meta
* structures, etc.
*
* It returns the number of meta structures consumed. The number of
* ring slots used is always equal to the number of meta slots used
* plus the number of GSO descriptors used. Currently, we use either
* zero GSO descriptors (for non-GSO packets) or one descriptor (for
* frontend-side LRO).
*/
static int netbk_gop_skb(struct sk_buff *skb,
struct netrx_pending_operations *npo)
{
struct xenvif *vif = netdev_priv(skb->dev);
int nr_frags = skb_shinfo(skb)->nr_frags;
int i;
struct xen_netif_rx_request *req;
struct netbk_rx_meta *meta;
unsigned char *data;
int head = 1;
int old_meta_prod;
old_meta_prod = npo->meta_prod;
/* Set up a GSO prefix descriptor, if necessary */
if (skb_shinfo(skb)->gso_size && vif->gso_prefix) {
req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
meta = npo->meta + npo->meta_prod++;
meta->gso_size = skb_shinfo(skb)->gso_size;
meta->size = 0;
meta->id = req->id;
}
req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
meta = npo->meta + npo->meta_prod++;
if (!vif->gso_prefix)
meta->gso_size = skb_shinfo(skb)->gso_size;
else
meta->gso_size = 0;
meta->size = 0;
meta->id = req->id;
npo->copy_off = 0;
npo->copy_gref = req->gref;
data = skb->data;
while (data < skb_tail_pointer(skb)) {
unsigned int offset = offset_in_page(data);
unsigned int len = PAGE_SIZE - offset;
if (data + len > skb_tail_pointer(skb))
len = skb_tail_pointer(skb) - data;
netbk_gop_frag_copy(vif, skb, npo,
virt_to_page(data), len, offset, &head);
data += len;
}
for (i = 0; i < nr_frags; i++) {
netbk_gop_frag_copy(vif, skb, npo,
skb_frag_page(&skb_shinfo(skb)->frags[i]),
skb_frag_size(&skb_shinfo(skb)->frags[i]),
skb_shinfo(skb)->frags[i].page_offset,
&head);
}
return npo->meta_prod - old_meta_prod;
}
示例10: greth_clean_tx_gbit
static void greth_clean_tx_gbit(struct net_device *dev)
{
struct greth_private *greth;
struct greth_bd *bdp, *bdp_last_frag;
struct sk_buff *skb;
u32 stat;
int nr_frags, i;
greth = netdev_priv(dev);
while (greth->tx_free < GRETH_TXBD_NUM) {
skb = greth->tx_skbuff[greth->tx_last];
nr_frags = skb_shinfo(skb)->nr_frags;
/* We only clean fully completed SKBs */
bdp_last_frag = greth->tx_bd_base + SKIP_TX(greth->tx_last, nr_frags);
GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX);
mb();
stat = greth_read_bd(&bdp_last_frag->stat);
if (stat & GRETH_BD_EN)
break;
greth->tx_skbuff[greth->tx_last] = NULL;
greth_update_tx_stats(dev, stat);
dev->stats.tx_bytes += skb->len;
bdp = greth->tx_bd_base + greth->tx_last;
greth->tx_last = NEXT_TX(greth->tx_last);
dma_unmap_single(greth->dev,
greth_read_bd(&bdp->addr),
skb_headlen(skb),
DMA_TO_DEVICE);
for (i = 0; i < nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
bdp = greth->tx_bd_base + greth->tx_last;
dma_unmap_page(greth->dev,
greth_read_bd(&bdp->addr),
skb_frag_size(frag),
DMA_TO_DEVICE);
greth->tx_last = NEXT_TX(greth->tx_last);
}
greth->tx_free += nr_frags+1;
dev_kfree_skb(skb);
}
if (netif_queue_stopped(dev) && (greth->tx_free > (MAX_SKB_FRAGS+1)))
netif_wake_queue(dev);
}
示例11: snapshot_record
void snapshot_record(struct pkt_snapshot *shot, struct sk_buff *skb)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
unsigned int limit;
unsigned int i;
shot->len = skb->len;
shot->data_len = skb->data_len;
shot->nr_frags = shinfo->nr_frags;
limit = SIMPLE_MIN(SNAPSHOT_FRAGS_SIZE, shot->nr_frags);
for (i = 0; i < limit; i++)
shot->frags[i] = skb_frag_size(&shinfo->frags[i]);
/*
* Ok so I only have room for SNAPSHOT_FRAGS_SIZE page sizes, unless I
* allocate. I don't want to allocate because that's an additional fail
* opportunity and I want this to be as unintrusive as possible.
*
* First of all, since PAGE_SIZE is 4k in my VM, and the typical
* Internet MTU is 1500 max, I don't think the packet is going
* to have more than one page.
*
* (Unless IP fragments are being treated as pages, but I don't think
* that's the case here because the crashing packet was an ICMP error,
* and defrag discards fragmented ICMP errors on reception because they
* are BS.)
*
* Second, even if we get multiple pages, I don't see why would they
* have different sizes. Except for the last one, that is.
*
* (Unless the crashing pages were IP fragments. Again, I don't think
* this is the case.)
*
* Therefore, if the packet has more than SNAPSHOT_FRAGS_SIZE pages,
* I'm going to risk it and override the last slottable page size with
* the most interesting one. (The last one.)
*
* Consider that when you're reading the output.
*/
if (shot->nr_frags > SNAPSHOT_FRAGS_SIZE) {
shot->frags[SNAPSHOT_FRAGS_SIZE - 1]
= skb_frag_size(&shinfo->frags[shot->nr_frags - 1]);
}
}
示例12: map_skb
static int map_skb(struct device *dev, const struct sk_buff *skb,
struct mpodp_tx *tx)
{
const skb_frag_t *fp, *end;
const struct skb_shared_info *si;
int count = 1;
dma_addr_t handler;
sg_init_table(tx->sg, MAX_SKB_FRAGS + 1);
handler = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
if (dma_mapping_error(dev, handler))
goto out_err;
sg_dma_address(&tx->sg[0]) = handler;
sg_dma_len(&tx->sg[0]) = skb_headlen(skb);
si = skb_shinfo(skb);
end = &si->frags[si->nr_frags];
for (fp = si->frags; fp < end; fp++, count++) {
handler = skb_frag_dma_map(dev, fp, 0, skb_frag_size(fp),
DMA_TO_DEVICE);
if (dma_mapping_error(dev, handler))
goto unwind;
sg_dma_address(&tx->sg[count]) = handler;
sg_dma_len(&tx->sg[count]) = skb_frag_size(fp);
}
sg_mark_end(&tx->sg[count - 1]);
tx->sg_len = count;
return 0;
unwind:
while (fp-- > si->frags)
dma_unmap_page(dev, sg_dma_address(&tx->sg[--count]),
skb_frag_size(fp), DMA_TO_DEVICE);
dma_unmap_single(dev, sg_dma_address(&tx->sg[0]),
skb_headlen(skb), DMA_TO_DEVICE);
out_err:
return -ENOMEM;
}
示例13: build_inline_wqe
static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *skb,
int real_size, u16 *vlan_tag, int tx_ind, void *fragptr)
{
struct mlx4_wqe_inline_seg *inl = &tx_desc->inl;
int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - sizeof *inl;
if (skb->len <= spc) {
inl->byte_count = cpu_to_be32(1 << 31 | skb->len);
skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb));
if (skb_shinfo(skb)->nr_frags)
memcpy(((void *)(inl + 1)) + skb_headlen(skb), fragptr,
skb_frag_size(&skb_shinfo(skb)->frags[0]));
} else {
inl->byte_count = cpu_to_be32(1 << 31 | spc);
if (skb_headlen(skb) <= spc) {
skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb));
if (skb_headlen(skb) < spc) {
memcpy(((void *)(inl + 1)) + skb_headlen(skb),
fragptr, spc - skb_headlen(skb));
fragptr += spc - skb_headlen(skb);
}
inl = (void *) (inl + 1) + spc;
memcpy(((void *)(inl + 1)), fragptr, skb->len - spc);
} else {
skb_copy_from_linear_data(skb, inl + 1, spc);
inl = (void *) (inl + 1) + spc;
skb_copy_from_linear_data_offset(skb, spc, inl + 1,
skb_headlen(skb) - spc);
if (skb_shinfo(skb)->nr_frags)
memcpy(((void *)(inl + 1)) + skb_headlen(skb) - spc,
fragptr, skb_frag_size(&skb_shinfo(skb)->frags[0]));
}
wmb();
inl->byte_count = cpu_to_be32(1 << 31 | (skb->len - spc));
}
tx_desc->ctrl.vlan_tag = cpu_to_be16(*vlan_tag);
tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN *
(!!vlan_tx_tag_present(skb));
tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
}
示例14: unmap_skb
static void unmap_skb(struct device *dev, const struct sk_buff *skb,
const struct mpodp_tx *tx)
{
const skb_frag_t *fp, *end;
const struct skb_shared_info *si;
int count = 1;
dma_unmap_single(dev, sg_dma_address(&tx->sg[0]), skb_headlen(skb), DMA_TO_DEVICE);
si = skb_shinfo(skb);
end = &si->frags[si->nr_frags];
for (fp = si->frags; fp < end; fp++, count++) {
dma_unmap_page(dev, sg_dma_address(&tx->sg[count]), skb_frag_size(fp), DMA_TO_DEVICE);
}
}
示例15: count_skb_frag_slots
static int count_skb_frag_slots(struct sk_buff *skb)
{
int i, frags = skb_shinfo(skb)->nr_frags;
int pages = 0;
for (i = 0; i < frags; i++) {
skb_frag_t *frag = skb_shinfo(skb)->frags + i;
unsigned long size = skb_frag_size(frag);
unsigned long offset = frag->page_offset;
/* Skip unused frames from start of page */
offset &= ~PAGE_MASK;
pages += PFN_UP(offset + size);
}
return pages;
}