本文整理汇总了C++中rte_mempool_create函数的典型用法代码示例。如果您正苦于以下问题:C++ rte_mempool_create函数的具体用法?C++ rte_mempool_create怎么用?C++ rte_mempool_create使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了rte_mempool_create函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: globalinit
static int
globalinit(struct virtif_user *viu)
{
int rv;
if ((rv = rte_eal_init(sizeof(ealargs)/sizeof(ealargs[0]),
/*UNCONST*/(void *)(uintptr_t)ealargs)) < 0)
OUT("eal init");
if ((mbpool_tx = rte_mempool_create("mbuf_pool_tx", NMBUF_TX, MBSIZE, 0/*MBCACHE*/,
sizeof(struct rte_pktmbuf_pool_private),
rte_pktmbuf_pool_init, NULL,
rte_pktmbuf_init, NULL, 0, 0)) == NULL) {
rv = -EINVAL;
OUT("mbuf pool tx");
}
if ((mbpool_rx = rte_mempool_create("mbuf_pool_rx", NMBUF_RX, MBSIZE, 0/*MBCACHE*/,
sizeof(struct rte_pktmbuf_pool_private),
rte_pktmbuf_pool_init, NULL,
rte_pktmbuf_init, NULL, 0, 0)) == NULL) {
rv = -EINVAL;
OUT("mbuf pool tx");
}
if (rte_eth_dev_count() == 0) {
rv = -1;
OUT("no ports");
}
rv = 0;
out:
return rv;
}
示例2: app_init_mbuf_pools
static void
app_init_mbuf_pools(void)
{
/* Init the buffer pool */
RTE_LOG(INFO, USER1, "Creating the mbuf pool ...\n");
app.pool = rte_mempool_create(
"mempool",
app.pool_size,
app.pool_buffer_size,
app.pool_cache_size,
sizeof(struct rte_pktmbuf_pool_private),
rte_pktmbuf_pool_init, NULL,
rte_pktmbuf_init, NULL,
rte_socket_id(),
0);
if (app.pool == NULL)
rte_panic("Cannot create mbuf pool\n");
/* Init the indirect buffer pool */
RTE_LOG(INFO, USER1, "Creating the indirect mbuf pool ...\n");
app.indirect_pool = rte_mempool_create(
"indirect mempool",
app.pool_size,
sizeof(struct rte_mbuf) + sizeof(struct app_pkt_metadata),
app.pool_cache_size,
0,
NULL, NULL,
rte_pktmbuf_init, NULL,
rte_socket_id(),
0);
if (app.indirect_pool == NULL)
rte_panic("Cannot create mbuf pool\n");
/* Init the message buffer pool */
RTE_LOG(INFO, USER1, "Creating the message pool ...\n");
app.msg_pool = rte_mempool_create(
"mempool msg",
app.msg_pool_size,
app.msg_pool_buffer_size,
app.msg_pool_cache_size,
0,
NULL, NULL,
rte_ctrlmbuf_init, NULL,
rte_socket_id(),
0);
if (app.msg_pool == NULL)
rte_panic("Cannot create message pool\n");
}
示例3: globalinit
static int
globalinit(void)
{
int rv;
if (rte_eal_init(sizeof(ealargs)/sizeof(ealargs[0]),
/*UNCONST*/(void *)(uintptr_t)ealargs) < 0)
OUT("eal init\n");
if ((mbpool = rte_mempool_create("mbuf_pool", NMBUF, MBSIZE, MBALIGN,
sizeof(struct rte_pktmbuf_pool_private),
rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL, 0, 0)) == NULL)
OUT("mbuf pool\n");
if (PMD_INIT() < 0)
OUT("wm driver\n");
if (rte_eal_pci_probe() < 0)
OUT("PCI probe\n");
if (rte_eth_dev_count() == 0)
OUT("no ports\n");
rv = 0;
out:
return rv;
}
示例4: spdk_nvmf_initialize_pools
static int
spdk_nvmf_initialize_pools(void)
{
SPDK_NOTICELOG("\n*** NVMf Pool Creation ***\n");
g_num_requests = MAX_SUBSYSTEMS * g_nvmf_tgt.MaxConnectionsPerSession * g_nvmf_tgt.MaxQueueDepth;
/* create NVMe backend request pool */
request_mempool = rte_mempool_create("NVMe_Pool",
g_num_requests,
spdk_nvme_request_size(),
128, 0,
NULL, NULL, NULL, NULL,
SOCKET_ID_ANY, 0);
if (!request_mempool) {
SPDK_ERRLOG("create NVMe request pool failed\n");
return -1;
}
SPDK_TRACELOG(SPDK_TRACE_DEBUG, "NVMe request_mempool %p, size %" PRIu64 " bytes\n",
request_mempool,
(uint64_t)g_num_requests * spdk_nvme_request_size());
return 0;
}
示例5: rte_mempool_lookup
/**
* @brief Initialize MBuf pool for device
*
* @param name const char*, name of MemPool object
*
* @return true if success and false otherwice
*/
bool DPDKAdapter::initDevMBufPool(const char* name)
{
if(!name)
return false;
// Don't create MemPool if it already exists
MemPool_t* pool = rte_mempool_lookup(name);
if(pool)
return pool;
pool = rte_mempool_create(name,
DPDK_MEMPOOL_SIZE,
MBUF_SIZE,
DPDK_MEMPOOL_CACHE_SIZE,
sizeof(struct rte_pktmbuf_pool_private),
rte_pktmbuf_pool_init, NULL,
rte_pktmbuf_init, NULL,
SOCKET_ID_ANY,
MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET);
if(pool == NULL)
{
qCritical("Can not init memory pool");
return false;
}
if(rte_mempool_lookup(name) != pool)
{
qCritical("Can not lookup memory pool by its name");
return false;
}
return true;
}
示例6: setup_mempools
static void setup_mempools(struct lcore_cfg* lcore_cfg)
{
char name[64];
struct lcore_cfg *lconf = 0;
for (uint8_t lcore_id = 0; lcore_id < RTE_MAX_LCORE; ++lcore_id) {
if (!rte_lcore_is_enabled(lcore_id) || lcore_id == tgen_cfg.master) {
continue;
}
lconf = &lcore_cfg[lcore_id];
uint8_t socket = rte_lcore_to_socket_id(lcore_id);
for (uint8_t task_id = 0; task_id < lconf->nb_tasks; ++task_id) {
struct task_startup_cfg *startup_cfg = &lconf->startup_cfg[task_id];
if (startup_cfg->rx_port != NO_PORT_AVAIL) {
/* allocate memory pool for packets */
if (startup_cfg->nb_mbuf == 0) {
startup_cfg->nb_mbuf = tgen_cfg.nb_mbuf;
}
/* use this pool for the interface that the core is receiving from */
sprintf(name, "core_%u_port_%u_pool", lcore_id, task_id);
startup_cfg->pool = rte_mempool_create(name,
startup_cfg->nb_mbuf - 1, MBUF_SIZE,
MAX_PKT_BURST * 4,
sizeof(struct rte_pktmbuf_pool_private),
rte_pktmbuf_pool_init, NULL,
tgen_pktmbuf_init, lconf,
socket, 0);
TGEN_PANIC(startup_cfg->pool == NULL, "\t\tError: cannot create mempool for core %u port %u\n", lcore_id, task_id);
mprintf("\t\tMempool %p size = %u * %u cache %u, socket %d\n", startup_cfg->pool,
startup_cfg->nb_mbuf, MBUF_SIZE, MAX_PKT_BURST * 4, socket);
}
}
}
}
示例7: init_mbuf_pools
/**
* Initialise the mbuf pool for packet reception for the NIC, and any other
* buffer pools needed by the app - currently none.
*/
static int
init_mbuf_pools(void)
{
const unsigned num_mbufs = (num_rings * MBUFS_PER_RING);
/* don't pass single-producer/single-consumer flags to mbuf create as it
* seems faster to use a cache instead */
printf("Creating mbuf pool '%s' [%u mbufs] ...\n",
HSM_POOL_NAME, num_mbufs);
if (rte_eal_process_type() == RTE_PROC_SECONDARY)
{
pktmbuf_pool = rte_mempool_lookup(HSM_POOL_NAME);
if (pktmbuf_pool == NULL)
rte_exit(EXIT_FAILURE, "Cannot get mempool for mbufs\n");
}
else
{
pktmbuf_pool = rte_mempool_create(HSM_POOL_NAME, num_mbufs,
MBUF_SIZE, MBUF_CACHE_SIZE,
sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init,
NULL, rte_pktmbuf_init, NULL, rte_socket_id(), NO_FLAGS );
}
return (pktmbuf_pool == NULL); /* 0 on success */
}
示例8: pktgen_mbuf_pool_create
static struct rte_mempool *
pktgen_mbuf_pool_create(const char * type, uint8_t pid, uint8_t queue_id,
uint32_t nb_mbufs, int socket_id, int cache_size )
{
struct rte_mempool * mp;
char name[RTE_MEMZONE_NAMESIZE];
snprintf(name, sizeof(name), "%-12s%u:%u", type, pid, queue_id);
pktgen_log_info(" Create: %-*s - Memory used (MBUFs %4u x (size %u + Hdr %lu)) + %lu = %6lu KB",
16, name, nb_mbufs, MBUF_SIZE, sizeof(struct rte_mbuf), sizeof(struct rte_mempool),
(((nb_mbufs * (MBUF_SIZE + sizeof(struct rte_mbuf)) + sizeof(struct rte_mempool))) + 1023)/1024);
pktgen.mem_used += ((nb_mbufs * (MBUF_SIZE + sizeof(struct rte_mbuf)) + sizeof(struct rte_mempool)));
pktgen.total_mem_used += ((nb_mbufs * (MBUF_SIZE + sizeof(struct rte_mbuf)) + sizeof(struct rte_mempool)));
/* create the mbuf pool */
mp = rte_mempool_create(name, nb_mbufs, MBUF_SIZE, cache_size,
sizeof(struct rte_pktmbuf_pool_private),
rte_pktmbuf_pool_init, NULL,
rte_pktmbuf_init, NULL,
socket_id, MEMPOOL_F_DMA);
if (mp == NULL)
pktgen_log_panic("Cannot create mbuf pool (%s) port %d, queue %d, nb_mbufs %d, socket_id %d: %s",
name, pid, queue_id, nb_mbufs, socket_id, rte_strerror(errno));
return mp;
}
示例9: globalinit
static int
globalinit(struct virtif_user *viu)
{
int rv;
if ((rv = rte_eal_init(sizeof(ealargs)/sizeof(ealargs[0]),
/*UNCONST*/(void *)(uintptr_t)ealargs)) < 0)
OUT("eal init\n");
/* disable mempool cache due to DPDK bug, not thread safe */
if ((mbpool = rte_mempool_create("mbuf_pool", NMBUF, MBSIZE, 0/*MBCACHE*/,
sizeof(struct rte_pktmbuf_pool_private),
rte_pktmbuf_pool_init, NULL,
rte_pktmbuf_init, NULL, 0, 0)) == NULL) {
rv = -EINVAL;
OUT("mbuf pool\n");
}
if ((rv = PMD_INIT()) < 0)
OUT("pmd init\n");
if ((rv = rte_eal_pci_probe()) < 0)
OUT("PCI probe\n");
if (rte_eth_dev_count() == 0) {
rv = -1;
OUT("no ports\n");
}
rv = 0;
out:
return rv;
}
示例10: create_mempool
/* Creates mempool for VIRTIO TXQ */
static struct rte_mempool* create_mempool(int core_id, struct virtio_net* dev,
int q_no)
{
unsigned socketid = rte_lcore_to_socket_id(core_id);
struct rte_mempool *pool;
uint32_t mp_size;
char name[32];
/* Create memory pool */
mp_size = VIRTIO_MAX_NB_BUF;
snprintf(name, 32, "virtio_%ld_%d", dev->device_fh, q_no);
do {
pool = rte_mempool_create(name,
mp_size,
VIRTIO_MBUF_SIZE,
VIRTIO_MP_CACHE_SIZE,
sizeof(struct rte_pktmbuf_pool_private),
rte_pktmbuf_pool_init,
NULL,
rte_pktmbuf_init,
NULL,
socketid,
0);
} while(!pool &&
rte_errno == ENOMEM &&
(mp_size /= 2) >= VIRTIO_MIN_NB_BUF);
return pool;
}
示例11: udpi_init_mbuf_pools
static void udpi_init_mbuf_pools(void)
{
/* Init the buffer pool */
RTE_LOG(INFO, MEMPOOL, "Creating the mbuf pool ...\n");
udpi.pool = rte_mempool_create("mempool",
udpi.pool_size,
udpi.pool_buffer_size,
udpi.pool_cache_size,
sizeof(struct rte_pktmbuf_pool_private),
rte_pktmbuf_pool_init,
NULL,
rte_pktmbuf_init,
NULL,
rte_socket_id(),
0);
if(NULL == udpi.pool)
{
rte_panic("Cannot create mbuf pool\n");
}
/* Init the indirect buffer pool */
/*RTE_LOG(INFO, MEMPOOL, "Creating the indirect mbuf pool ...\n");
udpi.indirect_pool = rte_mempool_create("indirect mempool",
udpi.pool_size,
sizeof(struct rte_mbuf), udpi.pool_cache_size, 0, NULL, NULL,
rte_pktmbuf_init, NULL, rte_socket_id(), 0);
if(NULL == udpi.indirect_pool)
{
rte_panic("Cannot create indirect mbuf pool\n");
}*/
/* Init the message buffer pool */
RTE_LOG(INFO, MEMPOOL, "Creating the message mbuf pool ...\n");
udpi.msg_pool = rte_mempool_create("msg mempool ",
udpi.msg_pool_size,
udpi.msg_pool_buffer_size,
udpi.msg_pool_cache_size,
0, NULL, NULL,
rte_ctrlmbuf_init, NULL,
rte_socket_id(), 0);
if(NULL == udpi.msg_pool)
{
rte_panic("Cannot create message mbuf pool\n");
}
return;
}
示例12: main
int main(int argc, char **argv)
{
int rc;
/*
* By default, the SPDK NVMe driver uses DPDK for huge page-based
* memory management and NVMe request buffer pools. Huge pages can
* be either 2MB or 1GB in size (instead of 4KB) and are pinned in
* memory. Pinned memory is important to ensure DMA operations
* never target swapped out memory.
*
* So first we must initialize DPDK. "-c 0x1" indicates to only use
* core 0.
*/
rc = rte_eal_init(sizeof(ealargs) / sizeof(ealargs[0]), ealargs);
if (rc < 0) {
fprintf(stderr, "could not initialize dpdk\n");
return 1;
}
/*
* Create the NVMe request buffer pool. This will be used internally
* by the SPDK NVMe driver to allocate an spdk_nvme_request data
* structure for each I/O request. This is implicitly passed to
* the SPDK NVMe driver via an extern declaration in nvme_impl.h.
*/
request_mempool = rte_mempool_create("nvme_request", 8192,
spdk_nvme_request_size(), 128, 0,
NULL, NULL, NULL, NULL,
SOCKET_ID_ANY, 0);
if (request_mempool == NULL) {
fprintf(stderr, "could not initialize request mempool\n");
return 1;
}
printf("Initializing NVMe Controllers\n");
/*
* Start the SPDK NVMe enumeration process. probe_cb will be called
* for each NVMe controller found, giving our application a choice on
* whether to attach to each controller. attach_cb will then be
* called for each controller after the SPDK NVMe driver has completed
* initializing the controller we chose to attach.
*/
rc = spdk_nvme_probe(NULL, probe_cb, attach_cb, NULL);
if (rc != 0) {
fprintf(stderr, "spdk_nvme_probe() failed\n");
cleanup();
return 1;
}
printf("Initialization complete.\n");
hello_world();
cleanup();
return 0;
}
示例13: pg_alloc_mempool
static void pg_alloc_mempool(void)
{
mp = rte_mempool_create("test_mempool", NUM_MBUFS, MBUF_SIZE,
MBUF_CACHE_SIZE,
sizeof(struct rte_pktmbuf_pool_private),
rte_pktmbuf_pool_init, NULL,
rte_pktmbuf_init, NULL,
rte_socket_id(), 0);
g_assert(mp);
}
示例14: main
int main(int argc, char **argv)
{
struct dev *iter;
int rc, i;
printf("NVMe Write/Read with End-to-End data protection test\n");
rc = rte_eal_init(sizeof(ealargs) / sizeof(ealargs[0]),
(char **)(void *)(uintptr_t)ealargs);
if (rc < 0) {
fprintf(stderr, "could not initialize dpdk\n");
exit(1);
}
request_mempool = rte_mempool_create("nvme_request", 8192,
spdk_nvme_request_size(), 128, 0,
NULL, NULL, NULL, NULL,
SOCKET_ID_ANY, 0);
if (request_mempool == NULL) {
fprintf(stderr, "could not initialize request mempool\n");
exit(1);
}
if (spdk_nvme_probe(NULL, probe_cb, attach_cb, NULL) != 0) {
fprintf(stderr, "nvme_probe() failed\n");
exit(1);
}
rc = 0;
foreach_dev(iter) {
#define TEST(x) write_read_e2e_dp_tests(iter, x, #x)
if (TEST(dp_with_pract_test)
|| TEST(dp_without_pract_extended_lba_test)
|| TEST(dp_without_flags_extended_lba_test)
|| TEST(dp_without_pract_separate_meta_test)
|| TEST(dp_without_pract_separate_meta_apptag_test)
|| TEST(dp_without_flags_separate_meta_test)) {
#undef TEST
rc = 1;
printf("%s: failed End-to-End data protection tests\n", iter->name);
}
}
printf("Cleaning up...\n");
for (i = 0; i < num_devs; i++) {
struct dev *dev = &devs[i];
spdk_nvme_detach(dev->ctrlr);
}
return rc;
}
示例15: main
/* Main function */
int main(int argc, char **argv)
{
int ret;
int i;
/* Create handler for SIGINT for CTRL + C closing and SIGALRM to print stats*/
signal(SIGINT, sig_handler);
signal(SIGALRM, alarm_routine);
/* Initialize DPDK enviroment with args, then shift argc and argv to get application parameters */
ret = rte_eal_init(argc, argv);
if (ret < 0) FATAL_ERROR("Cannot init EAL\n");
argc -= ret;
argv += ret;
/* Check if this application can use 1 core*/
ret = rte_lcore_count ();
if (ret != 2) FATAL_ERROR("This application needs exactly 2 cores.");
/* Parse arguments */
parse_args(argc, argv);
if (ret < 0) FATAL_ERROR("Wrong arguments\n");
/* Probe PCI bus for ethernet devices, mandatory only in DPDK < 1.8.0 */
#if RTE_VER_MAJOR == 1 && RTE_VER_MINOR < 8
ret = rte_eal_pci_probe();
if (ret < 0) FATAL_ERROR("Cannot probe PCI\n");
#endif
/* Get number of ethernet devices */
nb_sys_ports = rte_eth_dev_count();
if (nb_sys_ports <= 0) FATAL_ERROR("Cannot find ETH devices\n");
/* Create a mempool with per-core cache, initializing every element for be used as mbuf, and allocating on the current NUMA node */
pktmbuf_pool = rte_mempool_create(MEMPOOL_NAME, buffer_size-1, MEMPOOL_ELEM_SZ, MEMPOOL_CACHE_SZ, sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL,rte_socket_id(), 0);
if (pktmbuf_pool == NULL) FATAL_ERROR("Cannot create cluster_mem_pool. Errno: %d [ENOMEM: %d, ENOSPC: %d, E_RTE_NO_TAILQ: %d, E_RTE_NO_CONFIG: %d, E_RTE_SECONDARY: %d, EINVAL: %d, EEXIST: %d]\n", rte_errno, ENOMEM, ENOSPC, E_RTE_NO_TAILQ, E_RTE_NO_CONFIG, E_RTE_SECONDARY, EINVAL, EEXIST );
/* Create a ring for exchanging packets between cores, and allocating on the current NUMA node */
intermediate_ring = rte_ring_create (RING_NAME, buffer_size, rte_socket_id(), RING_F_SP_ENQ | RING_F_SC_DEQ );
if (intermediate_ring == NULL ) FATAL_ERROR("Cannot create ring");
/* Operations needed for each ethernet device */
for(i=0; i < nb_sys_ports; i++)
init_port(i);
/* Start consumer and producer routine on 2 different cores: producer launched first... */
ret = rte_eal_mp_remote_launch (main_loop_producer, NULL, SKIP_MASTER);
if (ret != 0) FATAL_ERROR("Cannot start consumer thread\n");
/* ... and then loop in consumer */
main_loop_consumer ( NULL );
return 0;
}