本文整理汇总了C++中PMD_INIT_FUNC_TRACE函数的典型用法代码示例。如果您正苦于以下问题:C++ PMD_INIT_FUNC_TRACE函数的具体用法?C++ PMD_INIT_FUNC_TRACE怎么用?C++ PMD_INIT_FUNC_TRACE使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了PMD_INIT_FUNC_TRACE函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: cn23xx_vf_setup_iq_regs
static void
cn23xx_vf_setup_iq_regs(struct lio_device *lio_dev, uint32_t iq_no)
{
struct lio_instr_queue *iq = lio_dev->instr_queue[iq_no];
uint64_t pkt_in_done = 0;
PMD_INIT_FUNC_TRACE();
/* Write the start of the input queue's ring and its size */
lio_write_csr64(lio_dev, CN23XX_SLI_IQ_BASE_ADDR64(iq_no),
iq->base_addr_dma);
lio_write_csr(lio_dev, CN23XX_SLI_IQ_SIZE(iq_no), iq->max_count);
/* Remember the doorbell & instruction count register addr
* for this queue
*/
iq->doorbell_reg = (uint8_t *)lio_dev->hw_addr +
CN23XX_SLI_IQ_DOORBELL(iq_no);
iq->inst_cnt_reg = (uint8_t *)lio_dev->hw_addr +
CN23XX_SLI_IQ_INSTR_COUNT64(iq_no);
lio_dev_dbg(lio_dev, "InstQ[%d]:dbell reg @ 0x%p instcnt_reg @ 0x%p\n",
iq_no, iq->doorbell_reg, iq->inst_cnt_reg);
/* Store the current instruction counter (used in flush_iq
* calculation)
*/
pkt_in_done = rte_read64(iq->inst_cnt_reg);
/* Clear the count by writing back what we read, but don't
* enable data traffic here
*/
rte_write64(pkt_in_done, iq->inst_cnt_reg);
}
示例2: cn23xx_vf_setup_global_input_regs
static int
cn23xx_vf_setup_global_input_regs(struct lio_device *lio_dev)
{
uint64_t q_no;
uint64_t d64;
PMD_INIT_FUNC_TRACE();
if (cn23xx_vf_reset_io_queues(lio_dev,
lio_dev->sriov_info.rings_per_vf))
return -1;
for (q_no = 0; q_no < (lio_dev->sriov_info.rings_per_vf); q_no++) {
lio_write_csr64(lio_dev, CN23XX_SLI_IQ_DOORBELL(q_no),
0xFFFFFFFF);
d64 = lio_read_csr64(lio_dev,
CN23XX_SLI_IQ_INSTR_COUNT64(q_no));
d64 &= 0xEFFFFFFFFFFFFFFFL;
lio_write_csr64(lio_dev, CN23XX_SLI_IQ_INSTR_COUNT64(q_no),
d64);
/* Select ES, RO, NS, RDSIZE,DPTR Fomat#0 for
* the Input Queues
*/
lio_write_csr64(lio_dev, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
CN23XX_PKT_INPUT_CTL_MASK);
}
return 0;
}
示例3: i40e_pf_host_uninit
int
i40e_pf_host_uninit(struct rte_eth_dev *dev)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
uint32_t val;
PMD_INIT_FUNC_TRACE();
/**
* return if SRIOV not enabled, VF number not configured or
* no queue assigned.
*/
if ((!hw->func_caps.sr_iov_1_1) ||
(pf->vf_num == 0) ||
(pf->vf_nb_qps == 0))
return I40E_SUCCESS;
/* free memory to store VF structure */
rte_free(pf->vfs);
pf->vfs = NULL;
/* Disable irq0 for VFR event */
i40e_pf_disable_irq0(hw);
/* Disable VF link status interrupt */
val = I40E_READ_REG(hw, I40E_PFGEN_PORTMDIO_NUM);
val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
I40E_WRITE_REG(hw, I40E_PFGEN_PORTMDIO_NUM, val);
I40E_WRITE_FLUSH(hw);
return I40E_SUCCESS;
}
示例4: qede_mac_addr_add
static void
qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr,
uint32_t index, __rte_unused uint32_t pool)
{
struct qede_dev *qdev = eth_dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
int rc;
PMD_INIT_FUNC_TRACE(edev);
if (index >= qdev->dev_info.num_mac_addrs) {
DP_ERR(edev, "Index %u is above MAC filter limit %u\n",
index, qdev->dev_info.num_mac_addrs);
return;
}
/* Adding macaddr even though promiscuous mode is set */
if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
DP_INFO(edev, "Port is in promisc mode, yet adding it\n");
/* Add MAC filters according to the unicast secondary macs */
rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_ADD,
mac_addr->addr_bytes);
if (rc)
DP_ERR(edev, "Unable to add macaddr rc=%d\n", rc);
}
示例5: vmxnet3_dev_clear_queues
void
vmxnet3_dev_clear_queues(struct rte_eth_dev *dev)
{
unsigned i;
PMD_INIT_FUNC_TRACE();
for (i = 0; i < dev->data->nb_tx_queues; i++) {
struct vmxnet3_tx_queue *txq = dev->data->tx_queues[i];
if (txq != NULL) {
txq->stopped = TRUE;
vmxnet3_dev_tx_queue_reset(txq);
}
}
for (i = 0; i < dev->data->nb_rx_queues; i++) {
struct vmxnet3_rx_queue *rxq = dev->data->rx_queues[i];
if (rxq != NULL) {
rxq->stopped = TRUE;
vmxnet3_dev_rx_queue_reset(rxq);
}
}
}
示例6: bnx2x_dev_start
static int
bnx2x_dev_start(struct rte_eth_dev *dev)
{
struct bnx2x_softc *sc = dev->data->dev_private;
int ret = 0;
PMD_INIT_FUNC_TRACE();
ret = bnx2x_init(sc);
if (ret) {
PMD_DRV_LOG(DEBUG, "bnx2x_init failed (%d)", ret);
return -1;
}
if (IS_PF(sc)) {
rte_intr_callback_register(&(dev->pci_dev->intr_handle),
bnx2x_interrupt_handler, (void *)dev);
if(rte_intr_enable(&(dev->pci_dev->intr_handle)))
PMD_DRV_LOG(ERR, "rte_intr_enable failed");
}
ret = bnx2x_dev_rx_init(dev);
if (ret != 0) {
PMD_DRV_LOG(DEBUG, "bnx2x_dev_rx_init returned error code");
return -3;
}
/* Print important adapter info for the user. */
bnx2x_print_adapter_info(sc);
DELAY_MS(2500);
return ret;
}
示例7: bnx2x_dev_configure
static int
bnx2x_dev_configure(struct rte_eth_dev *dev)
{
struct bnx2x_softc *sc = dev->data->dev_private;
int mp_ncpus = sysconf(_SC_NPROCESSORS_CONF);
int ret;
PMD_INIT_FUNC_TRACE();
if (dev->data->dev_conf.rxmode.jumbo_frame)
sc->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len;
if (dev->data->nb_tx_queues > dev->data->nb_rx_queues) {
PMD_DRV_LOG(ERR, "The number of TX queues is greater than number of RX queues");
return -EINVAL;
}
sc->num_queues = MAX(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
if (sc->num_queues > mp_ncpus) {
PMD_DRV_LOG(ERR, "The number of queues is more than number of CPUs");
return -EINVAL;
}
PMD_DRV_LOG(DEBUG, "num_queues=%d, mtu=%d",
sc->num_queues, sc->mtu);
/* allocate ilt */
if (bnx2x_alloc_ilt_mem(sc) != 0) {
PMD_DRV_LOG(ERR, "bnx2x_alloc_ilt_mem was failed");
return -ENXIO;
}
/* allocate the host hardware/software hsi structures */
if (bnx2x_alloc_hsi_mem(sc) != 0) {
PMD_DRV_LOG(ERR, "bnx2x_alloc_hsi_mem was failed");
bnx2x_free_ilt_mem(sc);
return -ENXIO;
}
if (IS_VF(sc)) {
if (bnx2x_dma_alloc(sc, sizeof(struct bnx2x_vf_mbx_msg),
&sc->vf2pf_mbox_mapping, "vf2pf_mbox",
RTE_CACHE_LINE_SIZE) != 0)
return -ENOMEM;
sc->vf2pf_mbox = (struct bnx2x_vf_mbx_msg *)sc->vf2pf_mbox_mapping.vaddr;
if (bnx2x_dma_alloc(sc, sizeof(struct bnx2x_vf_bulletin),
&sc->pf2vf_bulletin_mapping, "vf2pf_bull",
RTE_CACHE_LINE_SIZE) != 0)
return -ENOMEM;
sc->pf2vf_bulletin = (struct bnx2x_vf_bulletin *)sc->pf2vf_bulletin_mapping.vaddr;
ret = bnx2x_vf_get_resources(sc, sc->num_queues, sc->num_queues);
if (ret)
return ret;
}
return 0;
}
示例8: vmxnet3_dev_stop
/*
* Stop device: disable rx and tx functions to allow for reconfiguring.
*/
static void
vmxnet3_dev_stop(struct rte_eth_dev *dev)
{
struct rte_eth_link link;
struct vmxnet3_hw *hw = dev->data->dev_private;
PMD_INIT_FUNC_TRACE();
if (hw->adapter_stopped == 1) {
PMD_INIT_LOG(DEBUG, "Device already closed.");
return;
}
/* disable interrupts */
vmxnet3_disable_intr(hw);
/* quiesce the device first */
VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV);
VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAL, 0);
VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAH, 0);
/* reset the device */
VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
PMD_INIT_LOG(DEBUG, "Device reset.");
hw->adapter_stopped = 0;
vmxnet3_dev_clear_queues(dev);
/* Clear recorded link status */
memset(&link, 0, sizeof(link));
vmxnet3_dev_atomic_write_link_status(dev, &link);
}
示例9: avf_dev_stop
static void
avf_dev_stop(struct rte_eth_dev *dev)
{
struct avf_adapter *adapter =
AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev);
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = dev->intr_handle;
int ret, i;
PMD_INIT_FUNC_TRACE();
if (hw->adapter_stopped == 1)
return;
avf_stop_queues(dev);
/* Disable the interrupt for Rx */
rte_intr_efd_disable(intr_handle);
/* Rx interrupt vector mapping free */
if (intr_handle->intr_vec) {
rte_free(intr_handle->intr_vec);
intr_handle->intr_vec = NULL;
}
/* remove all mac addrs */
avf_add_del_all_mac_addr(adapter, FALSE);
hw->adapter_stopped = 1;
}
示例10: qat_qp_check_queue_alignment
static int qat_qp_check_queue_alignment(uint64_t phys_addr,
uint32_t queue_size_bytes)
{
PMD_INIT_FUNC_TRACE();
if (((queue_size_bytes - 1) & phys_addr) != 0)
return -EINVAL;
return 0;
}
示例11: bnx2x_dev_allmulticast_disable
static void
bnx2x_dev_allmulticast_disable(struct rte_eth_dev *dev)
{
struct bnx2x_softc *sc = dev->data->dev_private;
PMD_INIT_FUNC_TRACE();
sc->rx_mode = BNX2X_RX_MODE_NORMAL;
bnx2x_set_rx_mode(sc);
}
示例12: bnx2x_promisc_enable
static void
bnx2x_promisc_enable(struct rte_eth_dev *dev)
{
struct bnx2x_softc *sc = dev->data->dev_private;
PMD_INIT_FUNC_TRACE();
sc->rx_mode = BNX2X_RX_MODE_PROMISC;
bnx2x_set_rx_mode(sc);
}
示例13: qat_crypto_sym_configure_session
void *
qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
struct rte_crypto_sym_xform *xform, void *session_private)
{
struct qat_pmd_private *internals = dev->data->dev_private;
struct qat_session *session = session_private;
int qat_cmd_id;
PMD_INIT_FUNC_TRACE();
/* Get requested QAT command id */
qat_cmd_id = qat_get_cmd_id(xform);
if (qat_cmd_id < 0 || qat_cmd_id >= ICP_QAT_FW_LA_CMD_DELIMITER) {
PMD_DRV_LOG(ERR, "Unsupported xform chain requested");
goto error_out;
}
session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
switch (session->qat_cmd) {
case ICP_QAT_FW_LA_CMD_CIPHER:
session = qat_crypto_sym_configure_session_cipher(dev, xform, session);
break;
case ICP_QAT_FW_LA_CMD_AUTH:
session = qat_crypto_sym_configure_session_auth(dev, xform, session);
break;
case ICP_QAT_FW_LA_CMD_CIPHER_HASH:
session = qat_crypto_sym_configure_session_cipher(dev, xform, session);
session = qat_crypto_sym_configure_session_auth(dev, xform, session);
break;
case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
session = qat_crypto_sym_configure_session_auth(dev, xform, session);
session = qat_crypto_sym_configure_session_cipher(dev, xform, session);
break;
case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
case ICP_QAT_FW_LA_CMD_TRNG_TEST:
case ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE:
case ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE:
case ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE:
case ICP_QAT_FW_LA_CMD_MGF1:
case ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP:
case ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP:
case ICP_QAT_FW_LA_CMD_DELIMITER:
PMD_DRV_LOG(ERR, "Unsupported Service %u",
session->qat_cmd);
goto error_out;
default:
PMD_DRV_LOG(ERR, "Unsupported Service %u",
session->qat_cmd);
goto error_out;
}
return session;
error_out:
rte_mempool_put(internals->sess_mp, session);
return NULL;
}
示例14: cn23xx_vf_reset_io_queues
static int
cn23xx_vf_reset_io_queues(struct lio_device *lio_dev, uint32_t num_queues)
{
uint32_t loop = CN23XX_VF_BUSY_READING_REG_LOOP_COUNT;
uint64_t d64, q_no;
int ret_val = 0;
PMD_INIT_FUNC_TRACE();
for (q_no = 0; q_no < num_queues; q_no++) {
/* set RST bit to 1. This bit applies to both IQ and OQ */
d64 = lio_read_csr64(lio_dev,
CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
d64 = d64 | CN23XX_PKT_INPUT_CTL_RST;
lio_write_csr64(lio_dev, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
d64);
}
/* wait until the RST bit is clear or the RST and QUIET bits are set */
for (q_no = 0; q_no < num_queues; q_no++) {
volatile uint64_t reg_val;
reg_val = lio_read_csr64(lio_dev,
CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
while ((reg_val & CN23XX_PKT_INPUT_CTL_RST) &&
!(reg_val & CN23XX_PKT_INPUT_CTL_QUIET) &&
loop) {
reg_val = lio_read_csr64(
lio_dev,
CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
loop = loop - 1;
}
if (loop == 0) {
lio_dev_err(lio_dev,
"clearing the reset reg failed or setting the quiet reg failed for qno: %lu\n",
(unsigned long)q_no);
return -1;
}
reg_val = reg_val & ~CN23XX_PKT_INPUT_CTL_RST;
lio_write_csr64(lio_dev, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
reg_val);
reg_val = lio_read_csr64(
lio_dev, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
if (reg_val & CN23XX_PKT_INPUT_CTL_RST) {
lio_dev_err(lio_dev,
"clearing the reset failed for qno: %lu\n",
(unsigned long)q_no);
ret_val = -1;
}
}
return ret_val;
}
示例15: vmxnet3_dev_close
/*
* Reset and stop device.
*/
static void
vmxnet3_dev_close(struct rte_eth_dev *dev)
{
struct vmxnet3_hw *hw = dev->data->dev_private;
PMD_INIT_FUNC_TRACE();
vmxnet3_dev_stop(dev);
hw->adapter_stopped = 1;
}