本文整理汇总了C++中rte_eal_get_configuration函数的典型用法代码示例。如果您正苦于以下问题:C++ rte_eal_get_configuration函数的具体用法?C++ rte_eal_get_configuration怎么用?C++ rte_eal_get_configuration使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了rte_eal_get_configuration函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: slave_proc_func
/**
* Real function entrance ran in slave process
**/
static int
slave_proc_func(void)
{
struct rte_config *config;
unsigned slave_id = rte_lcore_id();
struct lcore_stat *cfg = &core_cfg[slave_id];
if (prctl(PR_SET_PDEATHSIG, SIG_PARENT_EXIT, 0, 0, 0, 0) != 0)
printf("Warning: Slave can't register for being notified in"
"case master process exited\n");
else {
struct sigaction act;
memset(&act, 0 , sizeof(act));
act.sa_handler = sighand_parent_exit;
if (sigaction(SIG_PARENT_EXIT, &act, NULL) != 0)
printf("Fail to register signal handler:%d\n", SIG_PARENT_EXIT);
}
/* Set slave process to SECONDARY to avoid operation like dev_start/stop etc */
config = rte_eal_get_configuration();
if (NULL == config)
printf("Warning:Can't get rte_config\n");
else
config->process_type = RTE_PROC_SECONDARY;
printf("Core %u is ready (pid=%d)\n", slave_id, (int)cfg->pid);
exit(cfg->f(cfg->arg));
}
示例2: rte_memzone_dump
/* Dump all reserved memory zones on console */
void
rte_memzone_dump(FILE *f)
{
struct rte_mem_config *mcfg;
unsigned i = 0;
/* get pointer to global configuration */
mcfg = rte_eal_get_configuration()->mem_config;
rte_rwlock_read_lock(&mcfg->mlock);
/* dump all zones */
for (i=0; i<RTE_MAX_MEMZONE; i++) {
if (mcfg->memzone[i].addr == NULL)
break;
fprintf(f, "Zone %u: name:<%s>, phys:0x%"PRIx64", len:0x%zx"
", virt:%p, socket_id:%"PRId32", flags:%"PRIx32"\n", i,
mcfg->memzone[i].name,
mcfg->memzone[i].phys_addr,
mcfg->memzone[i].len,
mcfg->memzone[i].addr,
mcfg->memzone[i].socket_id,
mcfg->memzone[i].flags);
}
rte_rwlock_read_unlock(&mcfg->mlock);
}
示例3: rte_xen_mem_phy2mch
/**
* Based on physical address to caculate MFN in Xen Dom0.
*/
phys_addr_t
rte_xen_mem_phy2mch(int32_t memseg_id, const phys_addr_t phy_addr)
{
int mfn_id, i;
uint64_t mfn, mfn_offset;
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
struct rte_memseg *memseg = mcfg->memseg;
/* find the memory segment owning the physical address */
if (memseg_id == -1) {
for (i = 0; i < RTE_MAX_MEMSEG; i++) {
if ((phy_addr >= memseg[i].phys_addr) &&
(phy_addr < memseg[i].phys_addr +
memseg[i].len)) {
memseg_id = i;
break;
}
}
if (memseg_id == -1)
return RTE_BAD_PHYS_ADDR;
}
mfn_id = (phy_addr - memseg[memseg_id].phys_addr) / RTE_PGSIZE_2M;
/*the MFN is contiguous in 2M */
mfn_offset = (phy_addr - memseg[memseg_id].phys_addr) %
RTE_PGSIZE_2M / PAGE_SIZE;
mfn = mfn_offset + memseg[memseg_id].mfn[mfn_id];
/** return mechine address */
return mfn * PAGE_SIZE + phy_addr % PAGE_SIZE;
}
示例4: rte_eal_memzone_init
/*
* Init the memzone subsystem
*/
int
rte_eal_memzone_init(void)
{
struct rte_mem_config *mcfg;
const struct rte_memseg *memseg;
/* get pointer to global configuration */
mcfg = rte_eal_get_configuration()->mem_config;
/* secondary processes don't need to initialise anything */
if (rte_eal_process_type() == RTE_PROC_SECONDARY)
return 0;
memseg = rte_eal_get_physmem_layout();
if (memseg == NULL) {
RTE_LOG(ERR, EAL, "%s(): Cannot get physical layout\n", __func__);
return -1;
}
rte_rwlock_write_lock(&mcfg->mlock);
/* delete all zones */
mcfg->memzone_cnt = 0;
memset(mcfg->memzone, 0, sizeof(mcfg->memzone));
rte_rwlock_write_unlock(&mcfg->mlock);
return rte_eal_malloc_heap_init();
}
示例5: find_heap_max_free_elem
/* This function will return the greatest free block if a heap has been
* specified. If no heap has been specified, it will return the heap and
* length of the greatest free block available in all heaps */
static size_t
find_heap_max_free_elem(int *s, unsigned align)
{
struct rte_mem_config *mcfg;
struct rte_malloc_socket_stats stats;
int i, socket = *s;
size_t len = 0;
/* get pointer to global configuration */
mcfg = rte_eal_get_configuration()->mem_config;
for (i = 0; i < RTE_MAX_NUMA_NODES; i++) {
if ((socket != SOCKET_ID_ANY) && (socket != i))
continue;
malloc_heap_get_stats(&mcfg->malloc_heaps[i], &stats);
if (stats.greatest_free_size > len) {
len = stats.greatest_free_size;
*s = i;
}
}
if (len < MALLOC_ELEM_OVERHEAD + align)
return 0;
return len - MALLOC_ELEM_OVERHEAD - align;
}
示例6: rte_dump_physmem_layout
/* Dump the physical memory layout on console */
void
rte_dump_physmem_layout(FILE *f)
{
const struct rte_mem_config *mcfg;
unsigned i = 0;
/* get pointer to global configuration */
mcfg = rte_eal_get_configuration()->mem_config;
for (i = 0; i < RTE_MAX_MEMSEG; i++) {
if (mcfg->memseg[i].addr == NULL)
break;
fprintf(f, "Segment %u: phys:0x%"PRIx64", len:%zu, "
"virt:%p, socket_id:%"PRId32", "
"hugepage_sz:%"PRIu64", nchannel:%"PRIx32", "
"nrank:%"PRIx32"\n", i,
mcfg->memseg[i].phys_addr,
mcfg->memseg[i].len,
mcfg->memseg[i].addr,
mcfg->memseg[i].socket_id,
mcfg->memseg[i].hugepage_sz,
mcfg->memseg[i].nchannel,
mcfg->memseg[i].nrank);
}
}
示例7: rte_memzone_reserve_bounded
/*
* Return a pointer to a correctly filled memzone descriptor (with a
* specified alignment and boundary).
* If the allocation cannot be done, return NULL.
*/
const struct rte_memzone *
rte_memzone_reserve_bounded(const char *name, size_t len,
int socket_id, unsigned flags, unsigned align, unsigned bound)
{
struct rte_mem_config *mcfg;
const struct rte_memzone *mz = NULL;
/* both sizes cannot be explicitly called for */
if (((flags & RTE_MEMZONE_1GB) && (flags & RTE_MEMZONE_2MB))
|| ((flags & RTE_MEMZONE_16MB) && (flags & RTE_MEMZONE_16GB))) {
rte_errno = EINVAL;
return NULL;
}
/* get pointer to global configuration */
mcfg = rte_eal_get_configuration()->mem_config;
rte_rwlock_write_lock(&mcfg->mlock);
mz = memzone_reserve_aligned_thread_unsafe(
name, len, socket_id, flags, align, bound);
rte_rwlock_write_unlock(&mcfg->mlock);
return mz;
}
示例8: rte_memzone_free
int
rte_memzone_free(const struct rte_memzone *mz)
{
struct rte_mem_config *mcfg;
int ret = 0;
void *addr;
unsigned idx;
if (mz == NULL)
return -EINVAL;
mcfg = rte_eal_get_configuration()->mem_config;
rte_rwlock_write_lock(&mcfg->mlock);
idx = ((uintptr_t)mz - (uintptr_t)mcfg->memzone);
idx = idx / sizeof(struct rte_memzone);
addr = mcfg->memzone[idx].addr;
if (addr == NULL)
ret = -EINVAL;
else if (mcfg->memzone_cnt == 0) {
rte_panic("%s(): memzone address not NULL but memzone_cnt is 0!\n",
__func__);
} else {
memset(&mcfg->memzone[idx], 0, sizeof(mcfg->memzone[idx]));
mcfg->memzone_cnt--;
}
rte_rwlock_write_unlock(&mcfg->mlock);
rte_free(addr);
return ret;
}
示例9: test_memzone_reserve_memory_in_smallest_segment
static int
test_memzone_reserve_memory_in_smallest_segment(void)
{
const struct rte_memzone *mz;
const struct rte_memseg *ms, *min_ms, *prev_min_ms;
size_t min_len, prev_min_len;
const struct rte_config *config;
int i;
config = rte_eal_get_configuration();
min_ms = NULL; /*< smallest segment */
prev_min_ms = NULL; /*< second smallest segment */
/* find two smallest segments */
for (i = 0; i < RTE_MAX_MEMSEG; i++) {
ms = &config->mem_config->free_memseg[i];
if (ms->addr == NULL)
break;
if (ms->len == 0)
continue;
if (min_ms == NULL)
min_ms = ms;
else if (min_ms->len > ms->len) {
/* set last smallest to second last */
prev_min_ms = min_ms;
/* set new smallest */
min_ms = ms;
} else if ((prev_min_ms == NULL)
|| (prev_min_ms->len > ms->len))
prev_min_ms = ms;
}
if (min_ms == NULL || prev_min_ms == NULL) {
printf("Smallest segments not found!\n");
return -1;
}
min_len = min_ms->len;
prev_min_len = prev_min_ms->len;
/* try reserving a memzone in the smallest memseg */
mz = rte_memzone_reserve("smallest_mz", RTE_CACHE_LINE_SIZE,
SOCKET_ID_ANY, 0);
if (mz == NULL) {
printf("Failed to reserve memory from smallest memseg!\n");
return -1;
}
if (prev_min_ms->len != prev_min_len &&
min_ms->len != min_len - RTE_CACHE_LINE_SIZE) {
printf("Reserved memory from wrong memseg!\n");
return -1;
}
return 0;
}
示例10: rte_eal_contigmem_attach
static int
rte_eal_contigmem_attach(void)
{
const struct hugepage_info *hpi;
int fd_hugepage_info, fd_hugepage = -1;
unsigned i = 0;
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
/* Obtain a file descriptor for hugepage_info */
fd_hugepage_info = open(eal_hugepage_info_path(), O_RDONLY);
if (fd_hugepage_info < 0) {
RTE_LOG(ERR, EAL, "Could not open %s\n", eal_hugepage_info_path());
return -1;
}
/* Map the shared hugepage_info into the process address spaces */
hpi = mmap(NULL, sizeof(struct hugepage_info), PROT_READ, MAP_PRIVATE,
fd_hugepage_info, 0);
if (hpi == NULL) {
RTE_LOG(ERR, EAL, "Could not mmap %s\n", eal_hugepage_info_path());
goto error;
}
/* Obtain a file descriptor for contiguous memory */
fd_hugepage = open(hpi->hugedir, O_RDWR);
if (fd_hugepage < 0) {
RTE_LOG(ERR, EAL, "Could not open %s\n", hpi->hugedir);
goto error;
}
/* Map the contiguous memory into each memory segment */
for (i = 0; i < hpi->num_pages[0]; i++) {
void *addr;
struct rte_memseg *seg = &mcfg->memseg[i];
addr = mmap(seg->addr, hpi->hugepage_sz, PROT_READ|PROT_WRITE,
MAP_SHARED|MAP_FIXED, fd_hugepage, i * PAGE_SIZE);
if (addr == MAP_FAILED || addr != seg->addr) {
RTE_LOG(ERR, EAL, "Failed to mmap buffer %u from %s\n",
i, hpi->hugedir);
goto error;
}
}
/* hugepage_info is no longer required */
munmap((void *)(uintptr_t)hpi, sizeof(struct hugepage_info));
close(fd_hugepage_info);
close(fd_hugepage);
return 0;
error:
if (fd_hugepage_info >= 0)
close(fd_hugepage_info);
if (fd_hugepage >= 0)
close(fd_hugepage);
return -1;
}
示例11: eal_parse_coremask
static int
eal_parse_coremask(const char *coremask)
{
struct rte_config *cfg = rte_eal_get_configuration();
int i, j, idx = 0 ;
unsigned count = 0;
char c;
int val;
if (coremask == NULL)
return -1;
/* Remove all blank characters ahead and after .
* Remove 0x/0X if exists.
*/
while (isblank(*coremask))
coremask++;
if (coremask[0] == '0' && ((coremask[1] == 'x')
|| (coremask[1] == 'X')) )
coremask += 2;
i = strnlen(coremask, PATH_MAX);
while ((i > 0) && isblank(coremask[i - 1]))
i--;
if (i == 0)
return -1;
for (i = i - 1; i >= 0 && idx < RTE_MAX_LCORE; i--) {
c = coremask[i];
if (isxdigit(c) == 0) {
/* invalid characters */
return (-1);
}
val = xdigit2val(c);
for(j = 0; j < BITS_PER_HEX && idx < RTE_MAX_LCORE; j++, idx++) {
if((1 << j) & val) {
if (!lcore_config[idx].detected) {
RTE_LOG(ERR, EAL, "lcore %u "
"unavailable\n", idx);
return -1;
}
cfg->lcore_role[idx] = ROLE_RTE;
if(count == 0)
cfg->master_lcore = idx;
count++;
} else {
cfg->lcore_role[idx] = ROLE_OFF;
}
}
}
for(; i >= 0; i--)
if(coremask[i] != '0')
return -1;
for(; idx < RTE_MAX_LCORE; idx++)
cfg->lcore_role[idx] = ROLE_OFF;
if(count == 0)
return -1;
/* Update the count of enabled logical cores of the EAL configuration */
cfg->lcore_count = count;
return 0;
}
示例12: rte_eal_cpu_init
/*
* Parse /sys/devices/system/cpu to get the number of physical and logical
* processors on the machine. The function will fill the cpu_info
* structure.
*/
int
rte_eal_cpu_init(void)
{
/* pointer to global configuration */
struct rte_config *config = rte_eal_get_configuration();
unsigned lcore_id;
unsigned count = 0;
/*
* Parse the maximum set of logical cores, detect the subset of running
* ones and enable them by default.
*/
for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
lcore_config[lcore_id].core_index = count;
/* init cpuset for per lcore config */
CPU_ZERO(&lcore_config[lcore_id].cpuset);
/* in 1:1 mapping, record related cpu detected state */
lcore_config[lcore_id].detected = eal_cpu_detected(lcore_id);
if (lcore_config[lcore_id].detected == 0) {
config->lcore_role[lcore_id] = ROLE_OFF;
lcore_config[lcore_id].core_index = -1;
continue;
}
/* By default, lcore 1:1 map to cpu id */
CPU_SET(lcore_id, &lcore_config[lcore_id].cpuset);
/* By default, each detected core is enabled */
config->lcore_role[lcore_id] = ROLE_RTE;
lcore_config[lcore_id].core_id = eal_cpu_core_id(lcore_id);
lcore_config[lcore_id].socket_id = eal_cpu_socket_id(lcore_id);
if (lcore_config[lcore_id].socket_id >= RTE_MAX_NUMA_NODES)
#ifdef RTE_EAL_ALLOW_INV_SOCKET_ID
lcore_config[lcore_id].socket_id = 0;
#else
rte_panic("Socket ID (%u) is greater than "
"RTE_MAX_NUMA_NODES (%d)\n",
lcore_config[lcore_id].socket_id,
RTE_MAX_NUMA_NODES);
#endif
RTE_LOG(DEBUG, EAL, "Detected lcore %u as "
"core %u on socket %u\n",
lcore_id, lcore_config[lcore_id].core_id,
lcore_config[lcore_id].socket_id);
count++;
}
/* Set the count of enabled logical cores of the EAL configuration */
config->lcore_count = count;
RTE_LOG(DEBUG, EAL,
"Support maximum %u logical core(s) by configuration.\n",
RTE_MAX_LCORE);
RTE_LOG(DEBUG, EAL, "Detected %u lcore(s)\n", config->lcore_count);
return 0;
}
示例13: rte_malloc_get_socket_stats
/*
* Function to retrieve data for heap on given socket
*/
int
rte_malloc_get_socket_stats(int socket,
struct rte_malloc_socket_stats *socket_stats)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
if (socket >= RTE_MAX_NUMA_NODES || socket < 0)
return -1;
return malloc_heap_get_stats(&mcfg->malloc_heaps[socket], socket_stats);
}
示例14: rte_eal_memzone_init
/*
* Init the memzone subsystem
*/
int
rte_eal_memzone_init(void)
{
struct rte_mem_config *mcfg;
const struct rte_memseg *memseg;
unsigned i = 0;
/* get pointer to global configuration */
mcfg = rte_eal_get_configuration()->mem_config;
/* mirror the runtime memsegs from config */
free_memseg = mcfg->free_memseg;
/* secondary processes don't need to initialise anything */
if (rte_eal_process_type() == RTE_PROC_SECONDARY)
return 0;
memseg = rte_eal_get_physmem_layout();
if (memseg == NULL) {
RTE_LOG(ERR, EAL, "%s(): Cannot get physical layout\n", __func__);
return -1;
}
rte_rwlock_write_lock(&mcfg->mlock);
/* fill in uninitialized free_memsegs */
for (i = 0; i < RTE_MAX_MEMSEG; i++) {
if (memseg[i].addr == NULL)
break;
if (free_memseg[i].addr != NULL)
continue;
memcpy(&free_memseg[i], &memseg[i], sizeof(struct rte_memseg));
}
/* make all zones cache-aligned */
for (i = 0; i < RTE_MAX_MEMSEG; i++) {
if (free_memseg[i].addr == NULL)
break;
if (memseg_sanitize(&free_memseg[i]) < 0) {
RTE_LOG(ERR, EAL, "%s(): Sanity check failed\n", __func__);
rte_rwlock_write_unlock(&mcfg->mlock);
return -1;
}
}
/* delete all zones */
mcfg->memzone_idx = 0;
memset(mcfg->memzone, 0, sizeof(mcfg->memzone));
rte_rwlock_write_unlock(&mcfg->mlock);
return 0;
}
示例15: set_lcore_state
static void
set_lcore_state(uint32_t lcore, int32_t state)
{
/* mark core state in hugepage backed config */
struct rte_config *cfg = rte_eal_get_configuration();
cfg->lcore_role[lcore] = state;
/* mark state in process local lcore_config */
lcore_config[lcore].core_role = state;
/* update per-lcore optimized state tracking */
lcore_states[lcore].is_service_core = (state == ROLE_SERVICE);
}