本文整理汇总了C++中RTE_DIM函数的典型用法代码示例。如果您正苦于以下问题:C++ RTE_DIM函数的具体用法?C++ RTE_DIM怎么用?C++ RTE_DIM使用的例子?那么, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了RTE_DIM函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: dump_acl6_rule
void
dump_acl6_rule(struct rte_mbuf *m, uint32_t sig)
{
unsigned i;
uint32_t offset = sig & ~ACL_DENY_SIGNATURE;
struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)(rte_pktmbuf_mtod(
m, unsigned char *)+sizeof(struct ether_hdr));
acl_log("Packet Src");
for (i = 0; i < RTE_DIM(ipv6_hdr->src_addr); i += sizeof(uint16_t))
acl_log(":%.2x%.2x", ipv6_hdr->src_addr[i],
ipv6_hdr->src_addr[i + 1]);
acl_log("\nDst");
for (i = 0; i < RTE_DIM(ipv6_hdr->dst_addr); i += sizeof(uint16_t))
acl_log(":%.2x%.2x", ipv6_hdr->dst_addr[i],
ipv6_hdr->dst_addr[i + 1]);
acl_log("\nSrc port:%hu,Dst port:%hu ",
rte_bswap16(*(uint16_t *)(ipv6_hdr + 1)),
rte_bswap16(*((uint16_t *)(ipv6_hdr + 1) + 1)));
acl_log("hit ACL %d - ", offset);
print_one_ipv6_rule(acl_config.rule_ipv6 + offset, 1);
acl_log("\n\n");
}
示例2: bnxt_dev_xstats_get_names_by_id_op
int bnxt_dev_xstats_get_names_by_id_op(struct rte_eth_dev *dev,
struct rte_eth_xstat_name *xstats_names,
const uint64_t *ids, unsigned int limit)
{
/* Account for the Tx drop pkts aka the Anti spoof counter */
const unsigned int stat_cnt = RTE_DIM(bnxt_rx_stats_strings) +
RTE_DIM(bnxt_tx_stats_strings) + 1;
struct rte_eth_xstat_name xstats_names_copy[stat_cnt];
uint16_t i;
if (!ids)
return bnxt_dev_xstats_get_names_op(dev, xstats_names,
stat_cnt);
bnxt_dev_xstats_get_names_by_id_op(dev, xstats_names_copy, NULL,
stat_cnt);
for (i = 0; i < limit; i++) {
if (ids[i] >= stat_cnt) {
RTE_LOG(ERR, PMD, "id value isn't valid");
return -1;
}
strcpy(xstats_names[i].name,
xstats_names_copy[ids[i]].name);
}
return stat_cnt;
}
示例3: setup_acl
static struct rte_acl_ctx *
setup_acl(struct rte_acl_rule *acl_base, unsigned int acl_num, int ipv6,
int socketid)
{
char name[PATH_MAX];
struct rte_acl_param acl_param;
struct rte_acl_config acl_build_param;
struct rte_acl_ctx *context;
int dim = ipv6 ? RTE_DIM(ipv6_defs) : RTE_DIM(ipv4_defs);
static uint32_t ctx_count[NB_SOCKETS] = {0};
if (!acl_num)
return NULL;
/* Create ACL contexts */
snprintf(name, sizeof(name), "%s%d-%d",
ipv6 ? L3FWD_ACL_IPV6_NAME : L3FWD_ACL_IPV4_NAME, socketid, ctx_count[socketid]++);
acl_param.name = name;
acl_param.socket_id = socketid;
acl_param.rule_size = RTE_ACL_RULE_SZ(dim);
acl_param.max_rule_num = MAX_ACL_RULE_NUM;
if ((context = rte_acl_create(&acl_param)) == NULL) {
acl_log("Failed to create ACL context\n");
goto err;
}
if (acl_parm_config.aclavx2 &&
rte_acl_set_ctx_classify(context, RTE_ACL_CLASSIFY_AVX2) != 0) {
acl_log("Failed to setup classify method for ACL context\n");
goto err;
}
if (rte_acl_add_rules(context, acl_base, acl_num) < 0) {
acl_log("add rules failed\n");
goto err;
}
/* Perform builds */
memset(&acl_build_param, 0, sizeof(acl_build_param));
acl_build_param.num_categories = DEFAULT_MAX_CATEGORIES;
acl_build_param.num_fields = dim;
memcpy(&acl_build_param.defs, ipv6 ? ipv6_defs : ipv4_defs,
ipv6 ? sizeof(ipv6_defs) : sizeof(ipv4_defs));
if (rte_acl_build(context, &acl_build_param) != 0) {
acl_log("Failed to build ACL trie\n");
goto err;
}
rte_acl_dump(context);
return context;
err:
rte_acl_free(context);
return NULL;
}
示例4: rt_init
void
rt_init(struct socket_ctx *ctx, int socket_id, unsigned ep)
{
char name[PATH_MAX];
unsigned i;
int ret;
struct rte_lpm *lpm;
struct ipv4_route *rt;
char a, b, c, d;
unsigned nb_routes;
struct rte_lpm_config conf = { 0 };
if (ctx == NULL)
rte_exit(EXIT_FAILURE, "NULL context.\n");
if (ctx->rt_ipv4 != NULL)
rte_exit(EXIT_FAILURE, "Routing Table for socket %u already "
"initialized\n", socket_id);
printf("Creating Routing Table (RT) context with %u max routes\n",
RT_IPV4_MAX_RULES);
if (ep == 0) {
rt = rt_ipv4_ep0;
nb_routes = RTE_DIM(rt_ipv4_ep0);
} else if (ep == 1) {
rt = rt_ipv4_ep1;
nb_routes = RTE_DIM(rt_ipv4_ep1);
} else
rte_exit(EXIT_FAILURE, "Invalid EP value %u. Only 0 or 1 "
"supported.\n", ep);
/* create the LPM table */
snprintf(name, sizeof(name), "%s_%u", "rt_ipv4", socket_id);
conf.max_rules = RT_IPV4_MAX_RULES;
conf.number_tbl8s = RTE_LPM_TBL8_NUM_ENTRIES;
lpm = rte_lpm_create(name, socket_id, &conf);
if (lpm == NULL)
rte_exit(EXIT_FAILURE, "Unable to create LPM table "
"on socket %d\n", socket_id);
/* populate the LPM table */
for (i = 0; i < nb_routes; i++) {
ret = rte_lpm_add(lpm, rt[i].ip, rt[i].depth, rt[i].if_out);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Unable to add entry num %u to "
"LPM table on socket %d\n", i, socket_id);
uint32_t_to_char(rt[i].ip, &a, &b, &c, &d);
printf("LPM: Adding route %hhu.%hhu.%hhu.%hhu/%hhu (%hhu)\n",
a, b, c, d, rt[i].depth, rt[i].if_out);
}
ctx->rt_ipv4 = (struct rt_ctx *)lpm;
}
示例5: rte_ivshmem_metadata_create
int rte_ivshmem_metadata_create(const char *name)
{
struct ivshmem_config * ivshmem_config;
unsigned index;
if (pagesz == 0)
pagesz = getpagesize();
if (name == NULL)
return -1;
rte_spinlock_lock(&global_cfg_sl);
for (index = 0; index < RTE_DIM(ivshmem_global_config); index++) {
if (ivshmem_global_config[index].metadata == NULL) {
ivshmem_config = &ivshmem_global_config[index];
break;
}
}
if (index == RTE_DIM(ivshmem_global_config)) {
RTE_LOG(ERR, EAL, "Cannot create more ivshmem config files. "
"Maximum has been reached\n");
rte_spinlock_unlock(&global_cfg_sl);
return -1;
}
ivshmem_config->lock.l_type = F_WRLCK;
ivshmem_config->lock.l_whence = SEEK_SET;
ivshmem_config->lock.l_start = 0;
ivshmem_config->lock.l_len = METADATA_SIZE_ALIGNED;
ivshmem_global_config[index].metadata = ((struct rte_ivshmem_metadata *)
ivshmem_metadata_create(
name,
sizeof(struct rte_ivshmem_metadata),
&ivshmem_config->lock));
if (ivshmem_global_config[index].metadata == NULL) {
rte_spinlock_unlock(&global_cfg_sl);
return -1;
}
/* Metadata setup */
memset(ivshmem_config->metadata, 0, sizeof(struct rte_ivshmem_metadata));
ivshmem_config->metadata->magic_number = IVSHMEM_MAGIC;
snprintf(ivshmem_config->metadata->name,
sizeof(ivshmem_config->metadata->name), "%s", name);
rte_spinlock_unlock(&global_cfg_sl);
return 0;
}
示例6: fd_reserve
static int32_t
fd_reserve(void)
{
uint32_t i;
for (i = 0; i != RTE_DIM(fd_port) && fd_port[i].port != FD_PORT_FREE;
i++)
;
if (i == RTE_DIM(fd_port))
return (-ENOMEM);
fd_port[i].port = FD_PORT_RSRV;
return (IDX_TO_FD(i));
}
示例7: sa_init
void
sa_init(struct socket_ctx *ctx, int socket_id, unsigned ep)
{
const struct ipsec_sa *sa_out_entries, *sa_in_entries;
unsigned nb_out_entries, nb_in_entries;
const char *name;
if (ctx == NULL)
rte_exit(EXIT_FAILURE, "NULL context.\n");
if (ctx->sa_ipv4_in != NULL)
rte_exit(EXIT_FAILURE, "Inbound SA DB for socket %u already "
"initialized\n", socket_id);
if (ctx->sa_ipv4_out != NULL)
rte_exit(EXIT_FAILURE, "Outbound SA DB for socket %u already "
"initialized\n", socket_id);
if (ep == 0) {
sa_out_entries = sa_ep0_out;
nb_out_entries = RTE_DIM(sa_ep0_out);
sa_in_entries = sa_ep0_in;
nb_in_entries = RTE_DIM(sa_ep0_in);
} else if (ep == 1) {
sa_out_entries = sa_ep1_out;
nb_out_entries = RTE_DIM(sa_ep1_out);
sa_in_entries = sa_ep1_in;
nb_in_entries = RTE_DIM(sa_ep1_in);
} else
rte_exit(EXIT_FAILURE, "Invalid EP value %u. "
"Only 0 or 1 supported.\n", ep);
name = "sa_ipv4_in";
ctx->sa_ipv4_in = sa_ipv4_create(name, socket_id);
if (ctx->sa_ipv4_in == NULL)
rte_exit(EXIT_FAILURE, "Error [%d] creating SA context %s "
"in socket %d\n", rte_errno, name, socket_id);
name = "sa_ipv4_out";
ctx->sa_ipv4_out = sa_ipv4_create(name, socket_id);
if (ctx->sa_ipv4_out == NULL)
rte_exit(EXIT_FAILURE, "Error [%d] creating SA context %s "
"in socket %d\n", rte_errno, name, socket_id);
sa_in_add_rules(ctx->sa_ipv4_in, sa_in_entries, nb_in_entries);
sa_out_add_rules(ctx->sa_ipv4_out, sa_out_entries, nb_out_entries);
}
示例8: bnxt_dev_xstats_get_op
int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev,
struct rte_eth_xstat *xstats, unsigned int n)
{
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
unsigned int count, i;
uint64_t tx_drop_pkts;
if (!(bp->flags & BNXT_FLAG_PORT_STATS)) {
RTE_LOG(ERR, PMD, "xstats not supported for VF\n");
return 0;
}
bnxt_hwrm_port_qstats(bp);
bnxt_hwrm_func_qstats_tx_drop(bp, 0xffff, &tx_drop_pkts);
count = RTE_DIM(bnxt_rx_stats_strings) +
RTE_DIM(bnxt_tx_stats_strings) + 1; /* For tx_drop_pkts */
if (n < count)
return count;
count = 0;
for (i = 0; i < RTE_DIM(bnxt_rx_stats_strings); i++) {
uint64_t *rx_stats = (uint64_t *)bp->hw_rx_port_stats;
xstats[count].id = count;
xstats[count].value = rte_le_to_cpu_64(
*(uint64_t *)((char *)rx_stats +
bnxt_rx_stats_strings[i].offset));
count++;
}
for (i = 0; i < RTE_DIM(bnxt_tx_stats_strings); i++) {
uint64_t *tx_stats = (uint64_t *)bp->hw_tx_port_stats;
xstats[count].id = count;
xstats[count].value = rte_le_to_cpu_64(
*(uint64_t *)((char *)tx_stats +
bnxt_tx_stats_strings[i].offset));
count++;
}
/* The Tx drop pkts aka the Anti spoof coounter */
xstats[count].id = count;
xstats[count].value = rte_le_to_cpu_64(tx_drop_pkts);
count++;
return count;
}
示例9: rte_cpu_check_supported
/**
* Checks if the machine is adequate for running the binary. If it is not, the
* program exits with status 1.
*/
void
rte_cpu_check_supported(void)
{
/* This is generated at compile-time by the build system */
static const enum rte_cpu_flag_t compile_time_flags[] = {
RTE_COMPILE_TIME_CPUFLAGS
};
unsigned count = RTE_DIM(compile_time_flags), i;
int ret;
for (i = 0; i < count; i++) {
ret = rte_cpu_get_flag_enabled(compile_time_flags[i]);
if (ret < 0) {
fprintf(stderr,
"ERROR: CPU feature flag lookup failed with error %d\n",
ret);
exit(1);
}
if (!ret) {
fprintf(stderr,
"ERROR: This system does not support \"%s\".\n"
"Please check that RTE_MACHINE is set correctly.\n",
rte_cpu_get_flag_name(compile_time_flags[i]));
exit(1);
}
}
}
示例10: deactivate_slave
void
deactivate_slave(struct rte_eth_dev *eth_dev, uint8_t port_id)
{
uint8_t slave_pos;
struct bond_dev_private *internals = eth_dev->data->dev_private;
uint8_t active_count = internals->active_slave_count;
if (internals->mode == BONDING_MODE_8023AD) {
bond_mode_8023ad_stop(eth_dev);
bond_mode_8023ad_deactivate_slave(eth_dev, port_id);
}
slave_pos = find_slave_by_id(internals->active_slaves, active_count,
port_id);
/* If slave was not at the end of the list
* shift active slaves up active array list */
if (slave_pos < active_count) {
active_count--;
memmove(internals->active_slaves + slave_pos,
internals->active_slaves + slave_pos + 1,
(active_count - slave_pos) *
sizeof(internals->active_slaves[0]));
}
RTE_VERIFY(active_count < RTE_DIM(internals->active_slaves));
internals->active_slave_count = active_count;
if (eth_dev->data->dev_started && internals->mode == BONDING_MODE_8023AD)
bond_mode_8023ad_start(eth_dev);
}
示例11: parse_args
static int
parse_args(int argc, char **argv)
{
int opt;
while ((opt = getopt(argc, argv, "hi:")) != -1) {
switch (opt) {
case 'h':
usage(argv[0]);
rte_exit(EXIT_SUCCESS, "exiting...");
break;
case 'i':
if (ports.num >= RTE_DIM(ports.p)) {
usage(argv[0]);
rte_exit(EXIT_FAILURE, "configs with %u "
"ports are not supported\n",
ports.num + 1);
}
ports.p[ports.num].str = optarg;
ports.p[ports.num].id = parse_portid(optarg);
ports.num++;
break;
default:
usage(argv[0]);
rte_exit(EXIT_FAILURE, "invalid option: %c\n", opt);
}
}
return 0;
}
示例12: parse_cperf_test_type
static int
parse_cperf_test_type(struct cperf_options *opts, const char *arg)
{
struct name_id_map cperftest_namemap[] = {
{
cperf_test_type_strs[CPERF_TEST_TYPE_THROUGHPUT],
CPERF_TEST_TYPE_THROUGHPUT
},
{
cperf_test_type_strs[CPERF_TEST_TYPE_VERIFY],
CPERF_TEST_TYPE_VERIFY
},
{
cperf_test_type_strs[CPERF_TEST_TYPE_LATENCY],
CPERF_TEST_TYPE_LATENCY
}
};
int id = get_str_key_id_mapping(
(struct name_id_map *)cperftest_namemap,
RTE_DIM(cperftest_namemap), arg);
if (id < 0) {
RTE_LOG(ERR, USER1, "failed to parse test type");
return -1;
}
opts->test = (enum cperf_perf_test_type)id;
return 0;
}
示例13: txq_mp2mr_reg
/**
* Register a Memory Region (MR) <-> Memory Pool (MP) association in
* txq->mp2mr[]. If mp2mr[] is full, remove an entry first.
*
* This function should only be called by txq_mp2mr().
*
* @param txq
* Pointer to TX queue structure.
* @param[in] mp
* Memory Pool for which a Memory Region lkey must be returned.
* @param idx
* Index of the next available entry.
*
* @return
* mr->lkey on success, (uint32_t)-1 on failure.
*/
uint32_t
txq_mp2mr_reg(struct txq *txq, struct rte_mempool *mp, unsigned int idx)
{
struct txq_ctrl *txq_ctrl = container_of(txq, struct txq_ctrl, txq);
struct ibv_mr *mr;
/* Add a new entry, register MR first. */
DEBUG("%p: discovered new memory pool \"%s\" (%p)",
(void *)txq_ctrl, mp->name, (void *)mp);
mr = mlx5_mp2mr(txq_ctrl->priv->pd, mp);
if (unlikely(mr == NULL)) {
DEBUG("%p: unable to configure MR, ibv_reg_mr() failed.",
(void *)txq_ctrl);
return (uint32_t)-1;
}
if (unlikely(idx == RTE_DIM(txq_ctrl->txq.mp2mr))) {
/* Table is full, remove oldest entry. */
DEBUG("%p: MR <-> MP table full, dropping oldest entry.",
(void *)txq_ctrl);
--idx;
claim_zero(ibv_dereg_mr(txq_ctrl->txq.mp2mr[0].mr));
memmove(&txq_ctrl->txq.mp2mr[0], &txq_ctrl->txq.mp2mr[1],
(sizeof(txq_ctrl->txq.mp2mr) -
sizeof(txq_ctrl->txq.mp2mr[0])));
}
/* Store the new entry. */
txq_ctrl->txq.mp2mr[idx].mp = mp;
txq_ctrl->txq.mp2mr[idx].mr = mr;
txq_ctrl->txq.mp2mr[idx].lkey = htonl(mr->lkey);
DEBUG("%p: new MR lkey for MP \"%s\" (%p): 0x%08" PRIu32,
(void *)txq_ctrl, mp->name, (void *)mp,
txq_ctrl->txq.mp2mr[idx].lkey);
return txq_ctrl->txq.mp2mr[idx].lkey;
}
示例14: vfio_has_supported_extensions
int
vfio_has_supported_extensions(int vfio_container_fd)
{
int ret;
unsigned idx, n_extensions = 0;
for (idx = 0; idx < RTE_DIM(iommu_types); idx++) {
const struct vfio_iommu_type *t = &iommu_types[idx];
ret = ioctl(vfio_container_fd, VFIO_CHECK_EXTENSION,
t->type_id);
if (ret < 0) {
RTE_LOG(ERR, EAL, " could not get IOMMU type, "
"error %i (%s)\n", errno,
strerror(errno));
close(vfio_container_fd);
return -1;
} else if (ret == 1) {
/* we found a supported extension */
n_extensions++;
}
RTE_LOG(DEBUG, EAL, " IOMMU type %d (%s) is %s\n",
t->type_id, t->name,
ret ? "supported" : "not supported");
}
/* if we didn't find any supported IOMMU types, fail */
if (!n_extensions) {
close(vfio_container_fd);
return -1;
}
return 0;
}
示例15: activate_slave
void
activate_slave(struct rte_eth_dev *eth_dev, uint8_t port_id)
{
struct bond_dev_private *internals = eth_dev->data->dev_private;
uint8_t active_count = internals->active_slave_count;
if (internals->mode == BONDING_MODE_8023AD)
bond_mode_8023ad_activate_slave(eth_dev, port_id);
if (internals->mode == BONDING_MODE_TLB
|| internals->mode == BONDING_MODE_ALB) {
internals->tlb_slaves_order[active_count] = port_id;
}
RTE_VERIFY(internals->active_slave_count <
(RTE_DIM(internals->active_slaves) - 1));
internals->active_slaves[internals->active_slave_count] = port_id;
internals->active_slave_count++;
if (internals->mode == BONDING_MODE_TLB)
bond_tlb_activate_slave(internals);
if (internals->mode == BONDING_MODE_ALB)
bond_mode_alb_client_list_upd(eth_dev);
}