本文整理汇总了C++中rte_panic函数的典型用法代码示例。如果您正苦于以下问题:C++ rte_panic函数的具体用法?C++ rte_panic怎么用?C++ rte_panic使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了rte_panic函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: main
int main(void)
{
int ret;
struct cmdline *cl;
int param_num = 8;
char *param[] = {"anscli",
"-c",
"1",
"-n",
"1",
"--no-pci",
"--socket-mem=1",
"--proc-type=secondary",
NULL};
rte_set_log_level(RTE_LOG_ERR);
ret = rte_eal_init(param_num, param);
if (ret < 0)
rte_panic("Cannot init EAL\n");
ret = anscli_ring_init();
if(ret != 0)
rte_panic("Cannot init ring\n");
cl = cmdline_stdin_new(ip_main_ctx, "ans> ");
if (cl == NULL)
rte_panic("Cannot create ans cmdline instance\n");
cmdline_interact(cl);
cmdline_stdin_exit(cl);
return 0;
}
示例2: udpi_init_rings
static void udpi_init_rings(void)
{
uint32_t n_swq, i;
n_swq = udpi.n_workers ;
RTE_LOG(INFO, USER1, "Initializing %u SW rings for ctrlmsg\n", n_swq);
udpi.msg_rings = (struct rte_ring**)rte_malloc_socket(NULL, n_swq * sizeof(struct rte_ring *),
RTE_CACHE_LINE_SIZE, rte_socket_id());
if (udpi.msg_rings == NULL)
rte_panic("Cannot allocate memory to store ring pointers\n");
for (i = 0; i < n_swq; i++) {
struct rte_ring *ring;
char name[32];
snprintf(name, sizeof(name), "udpi_ctrlmsg_%u", i);
ring = rte_ring_create(
name,
16,
rte_socket_id(),
RING_F_SC_DEQ|RING_F_SP_ENQ);
if (ring == NULL)
rte_panic("Cannot create ctrlmsg ring %u\n", i);
udpi.msg_rings[i] = ring;
}
}
示例3: rte_eal_remote_launch
/*
* Send a message to a slave lcore identified by slave_id to call a
* function f with argument arg. Once the execution is done, the
* remote lcore switch in FINISHED state.
*/
int
rte_eal_remote_launch(int (*f)(void *), void *arg, unsigned slave_id)
{
int n;
char c = 0;
int m2s = lcore_config[slave_id].pipe_master2slave[1];
int s2m = lcore_config[slave_id].pipe_slave2master[0];
if (lcore_config[slave_id].state != WAIT)
return -EBUSY;
lcore_config[slave_id].f = f;
lcore_config[slave_id].arg = arg;
/* send message */
n = 0;
while (n == 0 || (n < 0 && errno == EINTR))
n = write(m2s, &c, 1);
if (n < 0)
rte_panic("cannot write on configuration pipe\n");
/* wait ack */
do {
n = read(s2m, &c, 1);
} while (n < 0 && errno == EINTR);
if (n <= 0)
rte_panic("cannot read on configuration pipe\n");
return 0;
}
示例4: app_init_lpm_tables
static void
app_init_lpm_tables(void)
{
unsigned socket, lcore;
/* Init the LPM tables */
for (socket = 0; socket < APP_MAX_SOCKETS; socket ++) {
char name[32];
uint32_t rule;
if (app_is_socket_used(socket) == 0) {
continue;
}
struct rte_lpm_config lpm_config;
lpm_config.max_rules = APP_MAX_LPM_RULES;
lpm_config.number_tbl8s = 256;
lpm_config.flags = 0;
snprintf(name, sizeof(name), "lpm_table_%u", socket);
printf("Creating the LPM table for socket %u ...\n", socket);
app.lpm_tables[socket] = rte_lpm_create(
name,
socket,
&lpm_config);
if (app.lpm_tables[socket] == NULL) {
rte_panic("Unable to create LPM table on socket %u\n", socket);
}
for (rule = 0; rule < app.n_lpm_rules; rule ++) {
int ret;
ret = rte_lpm_add(app.lpm_tables[socket],
app.lpm_rules[rule].ip,
app.lpm_rules[rule].depth,
app.lpm_rules[rule].if_out);
if (ret < 0) {
rte_panic("Unable to add entry %u (%x/%u => %u) to the LPM table on socket %u (%d)\n",
(unsigned) rule,
(unsigned) app.lpm_rules[rule].ip,
(unsigned) app.lpm_rules[rule].depth,
(unsigned) app.lpm_rules[rule].if_out,
socket,
ret);
}
}
}
for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
if (app.lcore_params[lcore].type != e_APP_LCORE_WORKER) {
continue;
}
socket = rte_lcore_to_socket_id(lcore);
app.lcore_params[lcore].worker.lpm_table = app.lpm_tables[socket];
}
}
示例5: app_init_ports
static void
app_init_ports(void)
{
uint32_t i;
/* Init NIC ports, then start the ports */
for (i = 0; i < app.n_ports; i++) {
uint16_t port;
int ret;
port = app.ports[i];
RTE_LOG(INFO, USER1, "Initializing NIC port %u ...\n", port);
/* Init port */
ret = rte_eth_dev_configure(
port,
1,
1,
&port_conf);
if (ret < 0)
rte_panic("Cannot init NIC port %u (%d)\n", port, ret);
rte_eth_promiscuous_enable(port);
/* Init RX queues */
ret = rte_eth_rx_queue_setup(
port,
0,
app.port_rx_ring_size,
rte_eth_dev_socket_id(port),
&rx_conf,
app.pool);
if (ret < 0)
rte_panic("Cannot init RX for port %u (%d)\n",
(uint32_t) port, ret);
/* Init TX queues */
ret = rte_eth_tx_queue_setup(
port,
0,
app.port_tx_ring_size,
rte_eth_dev_socket_id(port),
&tx_conf);
if (ret < 0)
rte_panic("Cannot init TX for port %u (%d)\n",
(uint32_t) port, ret);
/* Start port */
ret = rte_eth_dev_start(port);
if (ret < 0)
rte_panic("Cannot start port %u (%d)\n", port, ret);
}
app_ports_check_link();
}
示例6: app_ping
void
app_ping(void)
{
unsigned i;
uint64_t timestamp, diff_tsc;
const uint64_t timeout = rte_get_tsc_hz() * APP_PING_TIMEOUT_SEC;
for (i = 0; i < RTE_MAX_LCORE; i++) {
struct app_core_params *p = &app.cores[i];
struct rte_ring *ring_req, *ring_resp;
void *msg;
struct app_msg_req *req;
int status;
if ((p->core_type != APP_CORE_FC) &&
(p->core_type != APP_CORE_FW) &&
(p->core_type != APP_CORE_RT) &&
(p->core_type != APP_CORE_RX))
continue;
ring_req = app_get_ring_req(p->core_id);
ring_resp = app_get_ring_resp(p->core_id);
/* Fill request message */
msg = (void *)rte_ctrlmbuf_alloc(app.msg_pool);
if (msg == NULL)
rte_panic("Unable to allocate new message\n");
req = (struct app_msg_req *)
rte_ctrlmbuf_data((struct rte_mbuf *)msg);
req->type = APP_MSG_REQ_PING;
/* Send request */
do {
status = rte_ring_sp_enqueue(ring_req, msg);
} while (status == -ENOBUFS);
/* Wait for response */
timestamp = rte_rdtsc();
do {
status = rte_ring_sc_dequeue(ring_resp, &msg);
diff_tsc = rte_rdtsc() - timestamp;
if (unlikely(diff_tsc > timeout))
rte_panic("Core %u of type %d does not respond "
"to requests\n", p->core_id,
p->core_type);
} while (status != 0);
/* Free message buffer */
rte_ctrlmbuf_free(msg);
}
}
示例7: rte_eal_config_create
/* create memory configuration in shared/mmap memory. Take out
* a write lock on the memsegs, so we can auto-detect primary/secondary.
* This means we never close the file while running (auto-close on exit).
* We also don't lock the whole file, so that in future we can use read-locks
* on other parts, e.g. memzones, to detect if there are running secondary
* processes. */
static void
rte_eal_config_create(void)
{
void *rte_mem_cfg_addr;
int retval;
const char *pathname = eal_runtime_config_path();
if (internal_config.no_shconf)
return;
/* map the config before hugepage address so that we don't waste a page */
if (internal_config.base_virtaddr != 0)
rte_mem_cfg_addr = (void *)
RTE_ALIGN_FLOOR(internal_config.base_virtaddr -
sizeof(struct rte_mem_config), sysconf(_SC_PAGE_SIZE));
else
rte_mem_cfg_addr = NULL;
if (mem_cfg_fd < 0){
mem_cfg_fd = open(pathname, O_RDWR | O_CREAT, 0660);
if (mem_cfg_fd < 0)
rte_panic("Cannot open '%s' for rte_mem_config\n", pathname);
}
retval = ftruncate(mem_cfg_fd, sizeof(*rte_config.mem_config));
if (retval < 0){
close(mem_cfg_fd);
rte_panic("Cannot resize '%s' for rte_mem_config\n", pathname);
}
retval = fcntl(mem_cfg_fd, F_SETLK, &wr_lock);
if (retval < 0){
close(mem_cfg_fd);
rte_exit(EXIT_FAILURE, "Cannot create lock on '%s'. Is another primary "
"process running?\n", pathname);
}
rte_mem_cfg_addr = mmap(rte_mem_cfg_addr, sizeof(*rte_config.mem_config),
PROT_READ | PROT_WRITE, MAP_SHARED, mem_cfg_fd, 0);
if (rte_mem_cfg_addr == MAP_FAILED){
rte_panic("Cannot mmap memory for rte_config\n");
}
memcpy(rte_mem_cfg_addr, &early_mem_config, sizeof(early_mem_config));
rte_config.mem_config = (struct rte_mem_config *) rte_mem_cfg_addr;
/* store address of the config in the config itself so that secondary
* processes could later map the config into this exact location */
rte_config.mem_config->mem_cfg_addr = (uintptr_t) rte_mem_cfg_addr;
}
示例8: app_init_nics
void
app_init_nics(void) {
#ifndef RTE_VERSION_NUM
/* Init driver */
printf("Initializing the PMD driver ...\n");
if (rte_pmd_init_all() < 0) {
rte_panic("Cannot init PMD\n");
}
#elif RTE_VERSION < RTE_VERSION_NUM(1, 8, 0, 0)
if (rte_eal_pci_probe() < 0) {
rte_panic("Cannot probe PCI\n");
}
#endif /* RTE_VERSION_NUM */
}
示例9: interfaceSetup
int32_t interfaceSetup(void)
{
uint8_t portIndex = 0, portCount = rte_eth_dev_count();
int32_t ret = 0, socket_id = -1;
struct rte_eth_link link;
for (portIndex = 0; portIndex < portCount; portIndex++)
{
/* fetch the socket Id to which the port the mapped */
for (ret = 0; ret < GTP_MAX_NUMANODE; ret++)
{
if (numaNodeInfo[ret].intfTotal) {
if (numaNodeInfo[ret].intfAvail & (1 << portIndex)) {
socket_id = ret;
break;
}
}
}
memset(&link, 0x00, sizeof(struct rte_eth_link));
ret = rte_eth_dev_configure(portIndex, 1, 1, &portConf);
if (unlikely(ret < 0))
{
rte_panic("ERROR: Dev Configure\n");
return -1;
}
ret = rte_eth_rx_queue_setup(portIndex, 0, RTE_TEST_RX_DESC_DEFAULT,
0, NULL, numaNodeInfo[socket_id].rx[0]);
if (unlikely(ret < 0))
{
rte_panic("ERROR: Rx Queue Setup\n");
return -2;
}
ret = rte_eth_tx_queue_setup(portIndex, 0, RTE_TEST_TX_DESC_DEFAULT,
0, NULL);
if (unlikely(ret < 0))
{
rte_panic("ERROR: Tx Queue Setup\n");
return -3;
}
rte_eth_promiscuous_enable(portIndex);
rte_eth_dev_start(portIndex);
}
return 0;
}
示例10: app_init_mbuf_pools
static void
app_init_mbuf_pools(void)
{
/* Init the buffer pool */
RTE_LOG(INFO, USER1, "Creating the mbuf pool ...\n");
app.pool = rte_mempool_create(
"mempool",
app.pool_size,
app.pool_buffer_size,
app.pool_cache_size,
sizeof(struct rte_pktmbuf_pool_private),
rte_pktmbuf_pool_init, NULL,
rte_pktmbuf_init, NULL,
rte_socket_id(),
0);
if (app.pool == NULL)
rte_panic("Cannot create mbuf pool\n");
/* Init the indirect buffer pool */
RTE_LOG(INFO, USER1, "Creating the indirect mbuf pool ...\n");
app.indirect_pool = rte_mempool_create(
"indirect mempool",
app.pool_size,
sizeof(struct rte_mbuf) + sizeof(struct app_pkt_metadata),
app.pool_cache_size,
0,
NULL, NULL,
rte_pktmbuf_init, NULL,
rte_socket_id(),
0);
if (app.indirect_pool == NULL)
rte_panic("Cannot create mbuf pool\n");
/* Init the message buffer pool */
RTE_LOG(INFO, USER1, "Creating the message pool ...\n");
app.msg_pool = rte_mempool_create(
"mempool msg",
app.msg_pool_size,
app.msg_pool_buffer_size,
app.msg_pool_cache_size,
0,
NULL, NULL,
rte_ctrlmbuf_init, NULL,
rte_socket_id(),
0);
if (app.msg_pool == NULL)
rte_panic("Cannot create message pool\n");
}
示例11: app_init_kni
static void
app_init_kni(struct app_params *app) {
if (app->n_pktq_kni == 0)
return;
rte_panic("Can not init KNI without librte_kni support.\n");
}
示例12: vmxnet3_tq_tx_complete
static inline void
vmxnet3_tq_tx_complete(vmxnet3_tx_queue_t *txq)
{
int completed = 0;
struct rte_mbuf *mbuf;
vmxnet3_comp_ring_t *comp_ring = &txq->comp_ring;
struct Vmxnet3_TxCompDesc *tcd = (struct Vmxnet3_TxCompDesc *)
(comp_ring->base + comp_ring->next2proc);
while (tcd->gen == comp_ring->gen) {
/* Release cmd_ring descriptor and free mbuf */
#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER
VMXNET3_ASSERT(txq->cmd_ring.base[tcd->txdIdx].txd.eop == 1);
#endif
mbuf = txq->cmd_ring.buf_info[tcd->txdIdx].m;
if (unlikely(mbuf == NULL))
rte_panic("EOP desc does not point to a valid mbuf");
else
rte_pktmbuf_free(mbuf);
txq->cmd_ring.buf_info[tcd->txdIdx].m = NULL;
/* Mark the txd for which tcd was generated as completed */
vmxnet3_cmd_ring_adv_next2comp(&txq->cmd_ring);
vmxnet3_comp_ring_adv_next2proc(comp_ring);
tcd = (struct Vmxnet3_TxCompDesc *)(comp_ring->base +
comp_ring->next2proc);
completed++;
}
PMD_TX_LOG(DEBUG, "Processed %d tx comps & command descs.", completed);
}
示例13: app_install_coremask
static int
app_install_coremask(uint64_t core_mask)
{
uint32_t n_cores, i;
for (n_cores = 0, i = 0; i < RTE_MAX_LCORE; i++)
if (app.cores[i].core_type != APP_CORE_NONE)
n_cores++;
if (n_cores != app.n_cores) {
rte_panic("Number of cores in COREMASK should be %u instead "
"of %u\n", n_cores, app.n_cores);
return -1;
}
for (i = 0; i < RTE_MAX_LCORE; i++) {
uint32_t core_id;
if (app.cores[i].core_type == APP_CORE_NONE)
continue;
core_id = __builtin_ctzll(core_mask);
core_mask &= ~(1LLU << core_id);
app.cores[i].core_id = core_id;
}
return 0;
}
示例14: rte_realloc
/*
* Resize allocated memory.
*/
void *
rte_realloc(void *ptr, size_t size, unsigned align)
{
if (ptr == NULL)
return rte_malloc(NULL, size, align);
struct malloc_elem *elem = malloc_elem_from_data(ptr);
if (elem == NULL)
rte_panic("Fatal error: memory corruption detected\n");
size = CACHE_LINE_ROUNDUP(size), align = CACHE_LINE_ROUNDUP(align);
/* check alignment matches first, and if ok, see if we can resize block */
if (RTE_PTR_ALIGN(ptr,align) == ptr &&
malloc_elem_resize(elem, size) == 0)
return ptr;
/* either alignment is off, or we have no room to expand,
* so move data. */
void *new_ptr = rte_malloc(NULL, size, align);
if (new_ptr == NULL)
return NULL;
const unsigned old_size = elem->size - MALLOC_ELEM_OVERHEAD;
rte_memcpy(new_ptr, ptr, old_size < size ? old_size : size);
rte_free(ptr);
return new_ptr;
}
示例15: app_main_loop_worker
void
app_main_loop_worker(void) {
struct app_mbuf_array *worker_mbuf;
uint32_t i;
RTE_LOG(INFO, USER1, "Core %u is doing work (no pipeline)\n",
rte_lcore_id());
worker_mbuf = rte_malloc_socket(NULL, sizeof(struct app_mbuf_array),
RTE_CACHE_LINE_SIZE, rte_socket_id());
if (worker_mbuf == NULL)
rte_panic("Worker thread: cannot allocate buffer space\n");
for (i = 0; ; i = ((i + 1) & (app.n_ports - 1))) {
int ret;
ret = rte_ring_sc_dequeue_bulk(
app.rings_rx[i],
(void **) worker_mbuf->array,
app.burst_size_worker_read);
if (ret == -ENOENT)
continue;
do {
ret = rte_ring_sp_enqueue_bulk(
app.rings_tx[i ^ 1],
(void **) worker_mbuf->array,
app.burst_size_worker_write);
} while (ret < 0);
}
}