本文整理汇总了C++中rte_pktmbuf_free函数的典型用法代码示例。如果您正苦于以下问题:C++ rte_pktmbuf_free函数的具体用法?C++ rte_pktmbuf_free怎么用?C++ rte_pktmbuf_free使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了rte_pktmbuf_free函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: vmxnet3_tq_tx_complete
static inline void
vmxnet3_tq_tx_complete(vmxnet3_tx_queue_t *txq)
{
int completed = 0;
struct rte_mbuf *mbuf;
vmxnet3_comp_ring_t *comp_ring = &txq->comp_ring;
struct Vmxnet3_TxCompDesc *tcd = (struct Vmxnet3_TxCompDesc *)
(comp_ring->base + comp_ring->next2proc);
while (tcd->gen == comp_ring->gen) {
/* Release cmd_ring descriptor and free mbuf */
#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER
VMXNET3_ASSERT(txq->cmd_ring.base[tcd->txdIdx].txd.eop == 1);
#endif
mbuf = txq->cmd_ring.buf_info[tcd->txdIdx].m;
if (unlikely(mbuf == NULL))
rte_panic("EOP desc does not point to a valid mbuf");
else
rte_pktmbuf_free(mbuf);
txq->cmd_ring.buf_info[tcd->txdIdx].m = NULL;
/* Mark the txd for which tcd was generated as completed */
vmxnet3_cmd_ring_adv_next2comp(&txq->cmd_ring);
vmxnet3_comp_ring_adv_next2proc(comp_ring);
tcd = (struct Vmxnet3_TxCompDesc *)(comp_ring->base +
comp_ring->next2proc);
completed++;
}
PMD_TX_LOG(DEBUG, "Processed %d tx comps & command descs.", completed);
}
示例2: handle_work_with_free_mbufs
/* to test that the distributor does not lose packets, we use this worker
* function which frees mbufs when it gets them. The distributor thread does
* the mbuf allocation. If distributor drops packets we'll eventually run out
* of mbufs.
*/
static int
handle_work_with_free_mbufs(void *arg)
{
struct rte_mbuf *buf[8] __rte_cache_aligned;
struct worker_params *wp = arg;
struct rte_distributor *d = wp->dist;
unsigned int count = 0;
unsigned int i;
unsigned int num = 0;
unsigned int id = __sync_fetch_and_add(&worker_idx, 1);
for (i = 0; i < 8; i++)
buf[i] = NULL;
num = rte_distributor_get_pkt(d, id, buf, buf, num);
while (!quit) {
worker_stats[id].handled_packets += num;
count += num;
for (i = 0; i < num; i++)
rte_pktmbuf_free(buf[i]);
num = rte_distributor_get_pkt(d,
id, buf, buf, num);
}
worker_stats[id].handled_packets += num;
count += num;
rte_distributor_return_pkt(d, id, buf, num);
return 0;
}
示例3: send_burst_nodrop
static inline void
send_burst_nodrop(struct rte_port_fd_writer_nodrop *p)
{
uint64_t n_retries;
uint32_t i;
n_retries = 0;
for (i = 0; (i < p->tx_buf_count) && (n_retries < p->n_retries); i++) {
struct rte_mbuf *pkt = p->tx_buf[i];
void *pkt_data = rte_pktmbuf_mtod(pkt, void*);
size_t n_bytes = rte_pktmbuf_data_len(pkt);
for ( ; n_retries < p->n_retries; n_retries++) {
ssize_t ret;
ret = write(p->fd, pkt_data, n_bytes);
if (ret)
break;
}
}
RTE_PORT_FD_WRITER_NODROP_STATS_PKTS_DROP_ADD(p, p->tx_buf_count - i);
for (i = 0; i < p->tx_buf_count; i++)
rte_pktmbuf_free(p->tx_buf[i]);
p->tx_buf_count = 0;
}
示例4: rte_port_fd_reader_rx
static int
rte_port_fd_reader_rx(void *port, struct rte_mbuf **pkts, uint32_t n_pkts)
{
struct rte_port_fd_reader *p = port;
uint32_t i, j;
if (rte_pktmbuf_alloc_bulk(p->mempool, pkts, n_pkts) != 0)
return 0;
for (i = 0; i < n_pkts; i++) {
struct rte_mbuf *pkt = pkts[i];
void *pkt_data = rte_pktmbuf_mtod(pkt, void *);
ssize_t n_bytes;
n_bytes = read(p->fd, pkt_data, (size_t) p->mtu);
if (n_bytes <= 0)
break;
pkt->data_len = n_bytes;
pkt->pkt_len = n_bytes;
}
for (j = i; j < n_pkts; j++)
rte_pktmbuf_free(pkts[j]);
RTE_PORT_FD_READER_STATS_PKTS_IN_ADD(p, i);
return i;
}
示例5: send_paxos_message
static void
send_paxos_message(paxos_message *pm) {
uint8_t port_id = 0;
struct rte_mbuf *created_pkt = rte_pktmbuf_alloc(mbuf_pool);
created_pkt->l2_len = sizeof(struct ether_hdr);
created_pkt->l3_len = sizeof(struct ipv4_hdr);
created_pkt->l4_len = sizeof(struct udp_hdr) + sizeof(paxos_message);
craft_new_packet(&created_pkt, IPv4(192,168,4,99), ACCEPTOR_ADDR,
PROPOSER_PORT, ACCEPTOR_PORT, sizeof(paxos_message), port_id);
//struct udp_hdr *udp;
size_t udp_offset = sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr);
//udp = rte_pktmbuf_mtod_offset(created_pkt, struct udp_hdr *, udp_offset);
size_t paxos_offset = udp_offset + sizeof(struct udp_hdr);
struct paxos_hdr *px = rte_pktmbuf_mtod_offset(created_pkt, struct paxos_hdr *, paxos_offset);
px->msgtype = rte_cpu_to_be_16(pm->type);
px->inst = rte_cpu_to_be_32(pm->u.accept.iid);
px->inst = rte_cpu_to_be_32(pm->u.accept.iid);
px->rnd = rte_cpu_to_be_16(pm->u.accept.ballot);
px->vrnd = rte_cpu_to_be_16(pm->u.accept.value_ballot);
px->acptid = 0;
rte_memcpy(px->paxosval, pm->u.accept.value.paxos_value_val, pm->u.accept.value.paxos_value_len);
created_pkt->ol_flags = PKT_TX_IPV4 | PKT_TX_IP_CKSUM | PKT_TX_UDP_CKSUM;
const uint16_t nb_tx = rte_eth_tx_burst(port_id, 0, &created_pkt, 1);
rte_pktmbuf_free(created_pkt);
rte_log(RTE_LOG_DEBUG, RTE_LOGTYPE_USER8, "Send %d messages\n", nb_tx);
}
示例6: unlink_vmdq
/*
* Removes MAC address and vlan tag from VMDQ. Ensures that nothing is adding buffers to the RX
* queue before disabling RX on the device.
*/
static inline void
unlink_vmdq(struct virtio_net *dev)
{
unsigned i = 0;
unsigned rx_count;
struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
if (dev->ready == DEVICE_READY) {
/*clear MAC and VLAN settings*/
rte_eth_dev_mac_addr_remove(ports[0], &dev->mac_address);
for (i = 0; i < 6; i++)
dev->mac_address.addr_bytes[i] = 0;
dev->vlan_tag = 0;
/*Clear out the receive buffers*/
rx_count = rte_eth_rx_burst(ports[0],
(uint16_t)dev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
while (rx_count) {
for (i = 0; i < rx_count; i++)
rte_pktmbuf_free(pkts_burst[i]);
rx_count = rte_eth_rx_burst(ports[0],
(uint16_t)dev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
}
dev->ready = DEVICE_NOT_READY;
}
}
示例7: send_burst_nodrop
static inline void
send_burst_nodrop(struct rte_port_ethdev_writer_nodrop *p)
{
uint32_t nb_tx = 0, i;
nb_tx = rte_eth_tx_burst(p->port_id, p->queue_id, p->tx_buf,
p->tx_buf_count);
/* We sent all the packets in a first try */
if (nb_tx >= p->tx_buf_count) {
p->tx_buf_count = 0;
return;
}
for (i = 0; i < p->n_retries; i++) {
nb_tx += rte_eth_tx_burst(p->port_id, p->queue_id,
p->tx_buf + nb_tx, p->tx_buf_count - nb_tx);
/* We sent all the packets in more than one try */
if (nb_tx >= p->tx_buf_count) {
p->tx_buf_count = 0;
return;
}
}
/* We didn't send the packets in maximum allowed attempts */
RTE_PORT_ETHDEV_WRITER_NODROP_STATS_PKTS_DROP_ADD(p, p->tx_buf_count - nb_tx);
for ( ; nb_tx < p->tx_buf_count; nb_tx++)
rte_pktmbuf_free(p->tx_buf[nb_tx]);
p->tx_buf_count = 0;
}
示例8: rte_lcore_id
/**
* @brief RX routine
*/
void DPDKAdapter::rxRoutine()
{
uint8_t pkt = 0;
uint8_t rxPktCount = 0;
uint8_t devId = 0;
uint8_t lcoreId = rte_lcore_id();
LcoreInfo& coreInfo = cores[lcoreId];
for(PortList_t::iterator itor = coreInfo.rxPortList.begin(); itor != coreInfo.rxPortList.end(); itor++)
{
devId = *itor;
DeviceInfo& devInfo = devices[devId];
struct rte_eth_dev *dev = &rte_eth_devices[devId];
if(!dev || !dev->data->dev_started)
{
continue;
}
rxPktCount = rte_eth_rx_burst(devId, 0, devInfo.rxBurstBuf, DPDK_RX_MAX_PKT_BURST);
if(isRxStarted(devId))
{
saveToBuf(devId, devInfo.rxBurstBuf, rxPktCount);
}
for(pkt = 0; pkt < rxPktCount; pkt++)
{
rte_pktmbuf_free(devInfo.rxBurstBuf[pkt]);
}
}
}
示例9: qDebug
/**
* @brief Copy all mbuf segments to a buffer
*
* @param devId port number
* @param pMbuf mbuf
* @param data Data buffer
* @param dataLen Data buffer length
*
* @return true on success
*/
bool DPDKAdapter::copyMbufToBuf(uint8_t devId, MBuf_t* pMbuf, char* data, unsigned int& dataLen)
{
qDebug("pkt_len %u, data_len %u, nb_segs %u", pMbuf->pkt.pkt_len, pMbuf->pkt.data_len, pMbuf->pkt.nb_segs);
unsigned int segCnt = pMbuf->pkt.nb_segs;
unsigned int offset = 0;
MBuf_t* pNextMbuf = pMbuf;
dataLen = pMbuf->pkt.pkt_len;
while (segCnt > 0)
{
MBuf_t* pCurMbuf = pNextMbuf;
qDebug("segCnt %u, offset %u", segCnt, offset);
rte_memcpy(data + offset, pCurMbuf->pkt.data, pCurMbuf->pkt.data_len);
qDebug("pkt_len %u, data_len %u", pCurMbuf->pkt.pkt_len, pCurMbuf->pkt.data_len);
offset += pCurMbuf->pkt.data_len;
pNextMbuf = pCurMbuf->pkt.next;
rte_pktmbuf_free(pCurMbuf);
segCnt--;
}
return true;
}
示例10: pcap_next_ex
int pcap_next_ex(pcap_t *p, struct pcap_pkthdr **pkt_header,
const u_char **pkt_data)
{
struct rte_mbuf* mbuf = NULL;
int len = 0;
if (p == NULL || pkt_header == NULL || pkt_data == NULL ||
p->deviceId < 0 || p->deviceId > RTE_MAX_ETHPORTS)
{
snprintf (errbuf_g, PCAP_ERRBUF_SIZE, "Invalid parameter");
return DPDKPCAP_FAILURE;
}
debug("Receiving a packet on port %d\n", p->deviceId);
while (!rte_eth_rx_burst(p->deviceId, 0, &mbuf, 1))
{
}
len = rte_pktmbuf_pkt_len(mbuf);
pktHeader_g.len = len;
*pkt_header = &pktHeader_g;
rte_memcpy((void*)data_g, rte_pktmbuf_mtod(mbuf, void*), len);
*pkt_data = data_g;
rte_pktmbuf_free(mbuf);
return 1;
}
示例11: kni_allocate_mbufs
static void
kni_allocate_mbufs(struct rte_kni *kni)
{
int i, ret;
struct rte_mbuf *pkts[MAX_MBUF_BURST_NUM];
/* Check if pktmbuf pool has been configured */
if (kni->pktmbuf_pool == NULL) {
RTE_LOG(ERR, KNI, "No valid mempool for allocating mbufs\n");
return;
}
for (i = 0; i < MAX_MBUF_BURST_NUM; i++) {
pkts[i] = rte_pktmbuf_alloc(kni->pktmbuf_pool);
if (unlikely(pkts[i] == NULL)) {
/* Out of memory */
RTE_LOG(ERR, KNI, "Out of memory\n");
break;
}
}
/* No pkt mbuf alocated */
if (i <= 0)
return;
ret = kni_fifo_put(kni->alloc_q, (void **)pkts, i);
/* Check if any mbufs not put into alloc_q, and then free them */
if (ret >= 0 && ret < i && ret < MAX_MBUF_BURST_NUM) {
int j;
for (j = ret; j < i; j++)
rte_pktmbuf_free(pkts[j]);
}
}
示例12: __attribute__
/*
* Main thread that does the work, reading from INPUT_PORT
* and writing to OUTPUT_PORT
*/
static __attribute__((noreturn)) void
lcore_main(void)
{
uint8_t port = 0;
if (rte_eth_dev_socket_id(port) > 0 &&
rte_eth_dev_socket_id(port) !=
(int)rte_socket_id())
printf("WARNING, port %u is on remote NUMA node to "
"polling thread.\n\tPerformance will "
"not be optimal.\n", port);
printf("\nCore %u forwarding packets. [Ctrl+C to quit]\n",
rte_lcore_id());
for (;;) {
struct rte_mbuf *bufs[BURST_SIZE];
const uint16_t nb_rx = rte_eth_rx_burst(port, 0,
bufs, BURST_SIZE);
uint16_t buf;
if (unlikely(nb_rx == 0))
continue;
for (buf = 0; buf < nb_rx; buf++) {
struct rte_mbuf *mbuf = bufs[buf];
unsigned int len = rte_pktmbuf_data_len(mbuf);
rte_pktmbuf_dump(stdout, mbuf, len);
rte_pktmbuf_free(mbuf);
}
}
}
示例13: set_mempool
static void set_mempool(struct rte_mempool *mempool) {
#if (!PER_CORE)
int initialized[RTE_MAX_NUMA_NODES];
for (int i = 0; i < RTE_MAX_NUMA_NODES; i++) {
initialized[i] = 0;
}
#endif
if (mempool == NULL) {
rte_panic("Got a NULL mempool");
}
/* Loop through all cores, to see if any of them belong to this
* socket. */
for (int i = 0; i < RTE_MAX_LCORE; i++) {
int sid = rte_lcore_to_socket_id(i);
#if (!PER_CORE)
if (!initialized[sid]) {
#endif
struct rte_mbuf *mbuf = NULL;
#if (PER_CORE)
pframe_pool[i] = mempool;
#else
pframe_pool[sid] = mempool;
#endif
/* Initialize mbuf template */
#if PER_CORE
mbuf = rte_pktmbuf_alloc(pframe_pool[i]);
if (mbuf == NULL) {
rte_panic("Bad mbuf");
}
mbuf_template[i] = *mbuf;
rte_pktmbuf_free(mbuf);
#else
mbuf = rte_pktmbuf_alloc(pframe_pool[sid]);
if (mbuf == NULL ||
mbuf->next != NULL ||
mbuf->pool == NULL) {
rte_panic("Bad mbuf");
}
mbuf_template[sid] = *mbuf;
rte_pktmbuf_free(mbuf);
#endif
#if (!PER_CORE)
initialized[sid] = 1;
}
#endif
}
}
示例14: rte_pktmbuf_free
void CGenNodeStateless::free_stl_node(){
/* if we have cache mbuf free it */
rte_mbuf_t * m=get_cache_mbuf();
if (m) {
rte_pktmbuf_free(m);
m_cache_mbuf=0;
}
}
示例15: pktmbuf_free_bulk
static inline void
pktmbuf_free_bulk(struct rte_mbuf *mbuf_table[], unsigned n)
{
unsigned int i;
for (i = 0; i < n; i++)
rte_pktmbuf_free(mbuf_table[i]);
}