本文整理汇总了C++中rte_zmalloc函数的典型用法代码示例。如果您正苦于以下问题:C++ rte_zmalloc函数的具体用法?C++ rte_zmalloc怎么用?C++ rte_zmalloc使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了rte_zmalloc函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: vhost_scsi_bdev_construct
static struct vhost_block_dev *
vhost_scsi_bdev_construct(const char *bdev_name, const char *bdev_serial,
uint32_t blk_size, uint64_t blk_cnt,
bool wce_enable)
{
struct vhost_block_dev *bdev;
bdev = rte_zmalloc(NULL, sizeof(*bdev), RTE_CACHE_LINE_SIZE);
if (!bdev)
return NULL;
strncpy(bdev->name, bdev_name, sizeof(bdev->name));
strncpy(bdev->product_name, bdev_serial, sizeof(bdev->product_name));
bdev->blocklen = blk_size;
bdev->blockcnt = blk_cnt;
bdev->write_cache = wce_enable;
/* use memory as disk storage space */
bdev->data = rte_zmalloc(NULL, blk_cnt * blk_size, 0);
if (!bdev->data) {
fprintf(stderr, "no enough reseverd huge memory for disk\n");
return NULL;
}
return bdev;
}
示例2: dp_without_flags_separate_meta_test
/*
* LBA + Metadata without data protection bits setting,
* separate metadata payload for the test case.
*/
static uint32_t dp_without_flags_separate_meta_test(struct spdk_nvme_ns *ns, struct io_request *req,
uint32_t *io_flags)
{
uint32_t md_size, sector_size;
req->lba_count = 16;
/* separate metadata payload for the test case */
if (spdk_nvme_ns_supports_extended_lba(ns))
return 0;
sector_size = spdk_nvme_ns_get_sector_size(ns);;
md_size = spdk_nvme_ns_get_md_size(ns);
req->contig = rte_zmalloc(NULL, sector_size * req->lba_count, 0x1000);
if (!req->contig)
return 0;
req->metadata = rte_zmalloc(NULL, md_size * req->lba_count, 0x1000);
if (!req->metadata) {
rte_free(req->contig);
return 0;
}
req->lba = 0x600000;
req->use_extended_lba = false;
*io_flags = 0;
return req->lba_count;
}
示例3: run_single_tbl_perf_test
/*
* Do a single performance test, of one type of operation.
*
* @param h
* hash table to run test on
* @param func
* function to call (add, delete or lookup function)
* @param avg_occupancy
* The average number of entries in each bucket of the hash table
* @param invalid_pos_count
* The amount of errors (e.g. due to a full bucket).
* @return
* The average number of ticks per hash function call. A negative number
* signifies failure.
*/
static double
run_single_tbl_perf_test(const struct rte_hash *h, hash_operation func,
const struct tbl_perf_test_params *params, double *avg_occupancy,
uint32_t *invalid_pos_count)
{
uint64_t begin, end, ticks = 0;
uint8_t *key = NULL;
uint32_t *bucket_occupancies = NULL;
uint32_t num_buckets, i, j;
int32_t pos;
/* Initialise */
num_buckets = params->entries / params->bucket_entries;
key = (uint8_t *) rte_zmalloc("hash key",
params->key_len * sizeof(uint8_t), 16);
if (key == NULL)
return -1;
bucket_occupancies = (uint32_t *) rte_zmalloc("bucket occupancies",
num_buckets * sizeof(uint32_t), 16);
if (bucket_occupancies == NULL) {
rte_free(key);
return -1;
}
ticks = 0;
*invalid_pos_count = 0;
for (i = 0; i < params->num_iterations; i++) {
/* Prepare inputs for the current iteration */
for (j = 0; j < params->key_len; j++)
key[j] = (uint8_t) rte_rand();
/* Perform operation, and measure time it takes */
begin = rte_rdtsc();
pos = func(h, key);
end = rte_rdtsc();
ticks += end - begin;
/* Other work per iteration */
if (pos < 0)
*invalid_pos_count += 1;
else
bucket_occupancies[pos / params->bucket_entries]++;
}
*avg_occupancy = get_avg(bucket_occupancies, num_buckets);
rte_free(bucket_occupancies);
rte_free(key);
return (double)ticks / params->num_iterations;
}
示例4: app_pipeline_fa_init
static void*
app_pipeline_fa_init(struct pipeline_params *params,
__rte_unused void *arg)
{
struct app_pipeline_fa *p;
uint32_t size, i;
/* Check input arguments */
if ((params == NULL) ||
(params->n_ports_in == 0) ||
(params->n_ports_out == 0))
return NULL;
/* Memory allocation */
size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct app_pipeline_fa));
p = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
if (p == NULL)
return NULL;
/* Initialization */
p->n_ports_in = params->n_ports_in;
p->n_ports_out = params->n_ports_out;
if (pipeline_fa_parse_args(&p->params, params)) {
rte_free(p);
return NULL;
}
/* Memory allocation */
size = RTE_CACHE_LINE_ROUNDUP(
p->params.n_flows * sizeof(struct app_pipeline_fa_flow));
p->flows = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
if (p->flows == NULL) {
rte_free(p);
return NULL;
}
/* Initialization of flow table */
for (i = 0; i < p->params.n_flows; i++)
pipeline_fa_flow_params_set_default(&p->flows[i].params);
/* Initialization of DSCP table */
for (i = 0; i < RTE_DIM(p->dscp); i++) {
p->dscp[i].traffic_class = 0;
p->dscp[i].color = e_RTE_METER_GREEN;
}
return (void *) p;
}
示例5: fs_sub_device_alloc
static int
fs_sub_device_alloc(struct rte_eth_dev *dev,
const char *params)
{
uint8_t nb_subs;
int ret;
int i;
ret = failsafe_args_count_subdevice(dev, params);
if (ret)
return ret;
if (PRIV(dev)->subs_tail > FAILSAFE_MAX_ETHPORTS) {
ERROR("Cannot allocate more than %d ports",
FAILSAFE_MAX_ETHPORTS);
return -ENOSPC;
}
nb_subs = PRIV(dev)->subs_tail;
PRIV(dev)->subs = rte_zmalloc(NULL,
sizeof(struct sub_device) * nb_subs,
RTE_CACHE_LINE_SIZE);
if (PRIV(dev)->subs == NULL) {
ERROR("Could not allocate sub_devices");
return -ENOMEM;
}
/* Initiate static sub devices linked list. */
for (i = 1; i < nb_subs; i++)
PRIV(dev)->subs[i - 1].next = PRIV(dev)->subs + i;
PRIV(dev)->subs[i - 1].next = PRIV(dev)->subs;
return 0;
}
示例6: vhost_new_device
/*
* Invoked when there is a new vhost-user connection established (when
* there is a new virtio device being attached).
*/
int
vhost_new_device(void)
{
struct virtio_net *dev;
int i;
dev = rte_zmalloc(NULL, sizeof(struct virtio_net), 0);
if (dev == NULL) {
RTE_LOG(ERR, VHOST_CONFIG,
"Failed to allocate memory for new dev.\n");
return -1;
}
for (i = 0; i < MAX_VHOST_DEVICE; i++) {
if (vhost_devices[i] == NULL)
break;
}
if (i == MAX_VHOST_DEVICE) {
RTE_LOG(ERR, VHOST_CONFIG,
"Failed to find a free slot for new device.\n");
rte_free(dev);
return -1;
}
vhost_devices[i] = dev;
dev->vid = i;
dev->slave_req_fd = -1;
return i;
}
示例7: trace_init_component
/*****************************************************************************
* trace_init_component()
****************************************************************************/
static int trace_init_component(uint32_t trace_id)
{
trace_comp_t *tc;
uint32_t i;
if (trace_id >= TRACE_MAX)
return -EINVAL;
tc = &trace_components[trace_id];
tc->tc_comp_id = trace_id;
/* To be set later if needed (through an API). */
tc->tc_fmt = NULL;
tc->tc_buffers = rte_zmalloc("trace_buffer",
rte_lcore_count() * sizeof(*tc->tc_buffers),
0);
if (!tc->tc_buffers)
return -ENOMEM;
for (i = 0; i < rte_lcore_count(); i++)
TRACE_BUF_SET_LEVEL(&tc->tc_buffers[i], TRACE_LVL_LOG);
return 0;
}
示例8: rte_vdpa_register_device
int
rte_vdpa_register_device(struct rte_vdpa_dev_addr *addr,
struct rte_vdpa_dev_ops *ops)
{
struct rte_vdpa_device *dev;
char device_name[MAX_VDPA_NAME_LEN];
int i;
if (vdpa_device_num >= MAX_VHOST_DEVICE)
return -1;
for (i = 0; i < MAX_VHOST_DEVICE; i++) {
dev = vdpa_devices[i];
if (dev && is_same_vdpa_device(&dev->addr, addr))
return -1;
}
for (i = 0; i < MAX_VHOST_DEVICE; i++) {
if (vdpa_devices[i] == NULL)
break;
}
sprintf(device_name, "vdpa-dev-%d", i);
dev = rte_zmalloc(device_name, sizeof(struct rte_vdpa_device),
RTE_CACHE_LINE_SIZE);
if (!dev)
return -1;
memcpy(&dev->addr, addr, sizeof(struct rte_vdpa_dev_addr));
dev->ops = ops;
vdpa_devices[i] = dev;
vdpa_device_num++;
return i;
}
示例9: pipeline_init
static void*
pipeline_init(__rte_unused struct pipeline_params *params, void *arg)
{
struct app_params *app = (struct app_params *) arg;
struct pipeline_master *p;
uint32_t size;
/* Check input arguments */
if (app == NULL)
return NULL;
/* Memory allocation */
size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct pipeline_master));
p = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
if (p == NULL)
return NULL;
/* Initialization */
p->app = app;
p->cl = cmdline_stdin_new(app->cmds, "pipeline> ");
if (p->cl == NULL) {
rte_free(p);
return NULL;
}
p->script_file_done = 0;
if (app->script_file == NULL)
p->script_file_done = 1;
return (void *) p;
}
示例10: app_pipeline_fc_init
static void*
app_pipeline_fc_init(struct pipeline_params *params,
__rte_unused void *arg)
{
struct app_pipeline_fc *p;
uint32_t size, i;
/* Check input arguments */
if ((params == NULL) ||
(params->n_ports_in == 0) ||
(params->n_ports_out == 0))
return NULL;
/* Memory allocation */
size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct app_pipeline_fc));
p = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
if (p == NULL)
return NULL;
/* Initialization */
p->n_ports_in = params->n_ports_in;
p->n_ports_out = params->n_ports_out;
for (i = 0; i < N_BUCKETS; i++)
TAILQ_INIT(&p->flows[i]);
p->n_flows = 0;
return (void *) p;
}
示例11: vhost_new_device
/*
* Function is called from the CUSE open function. The device structure is
* initialised and a new entry is added to the device configuration linked
* list.
*/
int
vhost_new_device(struct vhost_device_ctx ctx)
{
struct virtio_net *dev;
int i;
dev = rte_zmalloc(NULL, sizeof(struct virtio_net), 0);
if (dev == NULL) {
RTE_LOG(ERR, VHOST_CONFIG,
"(%"PRIu64") Failed to allocate memory for dev.\n",
ctx.fh);
return -1;
}
for (i = 0; i < MAX_VHOST_DEVICE; i++) {
if (vhost_devices[i] == NULL)
break;
}
if (i == MAX_VHOST_DEVICE) {
RTE_LOG(ERR, VHOST_CONFIG,
"Failed to find a free slot for new device.\n");
return -1;
}
vhost_devices[i] = dev;
dev->device_fh = i;
return i;
}
示例12: dp_with_pract_test
/*
* No protection information with PRACT setting to 1,
* both extended LBA format and separate metadata can
* run the test case.
*/
static uint32_t dp_with_pract_test(struct spdk_nvme_ns *ns, struct io_request *req,
uint32_t *io_flags)
{
uint32_t sector_size;
req->lba_count = 8;
sector_size = spdk_nvme_ns_get_sector_size(ns);
/* No additional metadata buffer provided */
req->contig = rte_zmalloc(NULL, sector_size * req->lba_count, 0x1000);
if (!req->contig)
return 0;
switch (spdk_nvme_ns_get_pi_type(ns)) {
case SPDK_NVME_FMT_NVM_PROTECTION_TYPE3:
*io_flags = SPDK_NVME_IO_FLAGS_PRCHK_GUARD | SPDK_NVME_IO_FLAGS_PRACT;
break;
case SPDK_NVME_FMT_NVM_PROTECTION_TYPE1:
case SPDK_NVME_FMT_NVM_PROTECTION_TYPE2:
*io_flags = SPDK_NVME_IO_FLAGS_PRCHK_GUARD | SPDK_NVME_IO_FLAGS_PRCHK_REFTAG |
SPDK_NVME_IO_FLAGS_PRACT;
break;
default:
*io_flags = 0;
break;
}
req->lba = 0x100000;
req->use_extended_lba = false;
req->metadata = NULL;
return req->lba_count;
}
示例13: ns_attach
static void
ns_attach(struct dev *device, int attachment_op, int ctrlr_id, int ns_id)
{
int ret = 0;
struct spdk_nvme_ctrlr_list *ctrlr_list;
ctrlr_list = rte_zmalloc("nvme controller list", sizeof(struct spdk_nvme_ctrlr_list),
4096);
if (ctrlr_list == NULL) {
printf("Allocation error (controller list)\n");
exit(1);
}
ctrlr_list->ctrlr_count = 1;
ctrlr_list->ctrlr_list[0] = ctrlr_id;
if (attachment_op == SPDK_NVME_NS_CTRLR_ATTACH) {
ret = spdk_nvme_ctrlr_attach_ns(device->ctrlr, ns_id, ctrlr_list);
} else if (attachment_op == SPDK_NVME_NS_CTRLR_DETACH) {
ret = spdk_nvme_ctrlr_detach_ns(device->ctrlr, ns_id, ctrlr_list);
}
if (ret) {
fprintf(stdout, "ns attach: Failed\n");
}
rte_free(ctrlr_list);
}
示例14: mg_distribute_register_output
// ATTENTION: Queue size must be at least one!!
int mg_distribute_register_output(
struct mg_distribute_config *cfg,
uint16_t number,
uint8_t port_id,
uint16_t queue_id,
uint16_t burst_size,
uint64_t timeout
){
if(number >= cfg->nr_outputs){
printf("ERROR: invalid outputnumber\n");
return -EINVAL;
}
cfg->outputs[number].port_id = port_id;
cfg->outputs[number].queue_id = queue_id;
cfg->outputs[number].timeout = timeout;
cfg->outputs[number].valid = 1;
if(burst_size != 0){
// allocate a queue for the output
// Aligned to cacheline...
// FIXME: MACRO for cacheline size?
struct mg_distribute_queue *queue = rte_zmalloc(NULL, sizeof(struct mg_distribute_queue) + burst_size * sizeof(struct rte_mbuf*), 64);
cfg->outputs[number].queue = queue;
cfg->outputs[number].queue->size = burst_size;
}
return 0;
}
示例15: register_ctrlr
static void
register_ctrlr(struct spdk_nvme_ctrlr *ctrlr)
{
int nsid, num_ns;
struct ctrlr_entry *entry = malloc(sizeof(struct ctrlr_entry));
const struct spdk_nvme_ctrlr_data *cdata = spdk_nvme_ctrlr_get_data(ctrlr);
if (entry == NULL) {
perror("ctrlr_entry malloc");
exit(1);
}
entry->latency_page = rte_zmalloc("nvme latency", sizeof(struct spdk_nvme_intel_rw_latency_page),
4096);
if (entry->latency_page == NULL) {
printf("Allocation error (latency page)\n");
exit(1);
}
snprintf(entry->name, sizeof(entry->name), "%-20.20s (%-20.20s)", cdata->mn, cdata->sn);
entry->ctrlr = ctrlr;
entry->next = g_controllers;
g_controllers = entry;
if (g_latency_tracking_enable &&
spdk_nvme_ctrlr_is_feature_supported(ctrlr, SPDK_NVME_INTEL_FEAT_LATENCY_TRACKING))
set_latency_tracking_feature(ctrlr, true);
num_ns = spdk_nvme_ctrlr_get_num_ns(ctrlr);
for (nsid = 1; nsid <= num_ns; nsid++) {
register_ns(ctrlr, spdk_nvme_ctrlr_get_ns(ctrlr, nsid));
}
}