本文整理汇总了C++中PMD_DRV_LOG函数的典型用法代码示例。如果您正苦于以下问题:C++ PMD_DRV_LOG函数的具体用法?C++ PMD_DRV_LOG怎么用?C++ PMD_DRV_LOG使用的例子?那么, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了PMD_DRV_LOG函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: ark_mpu_verify
int
ark_mpu_verify(struct ark_mpu_t *mpu, uint32_t obj_size)
{
uint32_t version;
version = mpu->id.vernum & 0x0000fF00;
if ((mpu->id.idnum != 0x2055504d) ||
(mpu->hw.obj_size != obj_size) ||
(version != 0x00003100)) {
PMD_DRV_LOG(ERR,
" MPU module not found as expected %08x"
" \"%c%c%c%c %c%c%c%c\"\n",
mpu->id.idnum,
mpu->id.id[0], mpu->id.id[1],
mpu->id.id[2], mpu->id.id[3],
mpu->id.ver[0], mpu->id.ver[1],
mpu->id.ver[2], mpu->id.ver[3]);
PMD_DRV_LOG(ERR,
" MPU HW num_queues: %u hw_depth %u,"
" obj_size: %u, obj_per_mrr: %u"
" Expected size %u\n",
mpu->hw.num_queues,
mpu->hw.hw_depth,
mpu->hw.obj_size,
mpu->hw.obj_per_mrr,
obj_size);
return -1;
}
return 0;
}
示例2: rte_pmd_i40e_get_vf_native_stats
static int
rte_pmd_i40e_get_vf_native_stats(uint16_t port,
uint16_t vf_id,
struct i40e_eth_stats *stats)
{
struct rte_eth_dev *dev;
struct i40e_pf *pf;
struct i40e_vsi *vsi;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
dev = &rte_eth_devices[port];
if (!is_i40e_supported(dev))
return -ENOTSUP;
pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
if (vf_id >= pf->vf_num || !pf->vfs) {
PMD_DRV_LOG(ERR, "Invalid VF ID.");
return -EINVAL;
}
vsi = pf->vfs[vf_id].vsi;
if (!vsi) {
PMD_DRV_LOG(ERR, "Invalid VSI.");
return -EINVAL;
}
i40e_update_vsi_stats(vsi);
memcpy(stats, &vsi->eth_stats, sizeof(vsi->eth_stats));
return 0;
}
示例3: bnx2x_dev_start
static int
bnx2x_dev_start(struct rte_eth_dev *dev)
{
struct bnx2x_softc *sc = dev->data->dev_private;
int ret = 0;
PMD_INIT_FUNC_TRACE();
ret = bnx2x_init(sc);
if (ret) {
PMD_DRV_LOG(DEBUG, "bnx2x_init failed (%d)", ret);
return -1;
}
if (IS_PF(sc)) {
rte_intr_callback_register(&(dev->pci_dev->intr_handle),
bnx2x_interrupt_handler, (void *)dev);
if(rte_intr_enable(&(dev->pci_dev->intr_handle)))
PMD_DRV_LOG(ERR, "rte_intr_enable failed");
}
ret = bnx2x_dev_rx_init(dev);
if (ret != 0) {
PMD_DRV_LOG(DEBUG, "bnx2x_dev_rx_init returned error code");
return -3;
}
/* Print important adapter info for the user. */
bnx2x_print_adapter_info(sc);
DELAY_MS(2500);
return ret;
}
示例4: bnx2x_dev_configure
static int
bnx2x_dev_configure(struct rte_eth_dev *dev)
{
struct bnx2x_softc *sc = dev->data->dev_private;
int mp_ncpus = sysconf(_SC_NPROCESSORS_CONF);
int ret;
PMD_INIT_FUNC_TRACE();
if (dev->data->dev_conf.rxmode.jumbo_frame)
sc->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len;
if (dev->data->nb_tx_queues > dev->data->nb_rx_queues) {
PMD_DRV_LOG(ERR, "The number of TX queues is greater than number of RX queues");
return -EINVAL;
}
sc->num_queues = MAX(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
if (sc->num_queues > mp_ncpus) {
PMD_DRV_LOG(ERR, "The number of queues is more than number of CPUs");
return -EINVAL;
}
PMD_DRV_LOG(DEBUG, "num_queues=%d, mtu=%d",
sc->num_queues, sc->mtu);
/* allocate ilt */
if (bnx2x_alloc_ilt_mem(sc) != 0) {
PMD_DRV_LOG(ERR, "bnx2x_alloc_ilt_mem was failed");
return -ENXIO;
}
/* allocate the host hardware/software hsi structures */
if (bnx2x_alloc_hsi_mem(sc) != 0) {
PMD_DRV_LOG(ERR, "bnx2x_alloc_hsi_mem was failed");
bnx2x_free_ilt_mem(sc);
return -ENXIO;
}
if (IS_VF(sc)) {
if (bnx2x_dma_alloc(sc, sizeof(struct bnx2x_vf_mbx_msg),
&sc->vf2pf_mbox_mapping, "vf2pf_mbox",
RTE_CACHE_LINE_SIZE) != 0)
return -ENOMEM;
sc->vf2pf_mbox = (struct bnx2x_vf_mbx_msg *)sc->vf2pf_mbox_mapping.vaddr;
if (bnx2x_dma_alloc(sc, sizeof(struct bnx2x_vf_bulletin),
&sc->pf2vf_bulletin_mapping, "vf2pf_bull",
RTE_CACHE_LINE_SIZE) != 0)
return -ENOMEM;
sc->pf2vf_bulletin = (struct bnx2x_vf_bulletin *)sc->pf2vf_bulletin_mapping.vaddr;
ret = bnx2x_vf_get_resources(sc, sc->num_queues, sc->num_queues);
if (ret)
return ret;
}
return 0;
}
示例5: avf_start_queues
static int
avf_start_queues(struct rte_eth_dev *dev)
{
struct avf_rx_queue *rxq;
struct avf_tx_queue *txq;
int i;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
txq = dev->data->tx_queues[i];
if (txq->tx_deferred_start)
continue;
if (avf_dev_tx_queue_start(dev, i) != 0) {
PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
return -1;
}
}
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
if (rxq->rx_deferred_start)
continue;
if (avf_dev_rx_queue_start(dev, i) != 0) {
PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
return -1;
}
}
return 0;
}
示例6: i40e_check_fdir_flex_payload
/* i40e_check_fdir_flex_payload -check flex payload configuration arguments */
static inline int
i40e_check_fdir_flex_payload(const struct rte_eth_flex_payload_cfg *flex_cfg)
{
struct i40e_fdir_flex_pit flex_pit[I40E_FDIR_MAX_FLEX_LEN];
uint16_t num, i;
for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i++) {
if (flex_cfg->src_offset[i] >= I40E_MAX_FLX_SOURCE_OFF) {
PMD_DRV_LOG(ERR, "exceeds maxmial payload limit.");
return -EINVAL;
}
}
memset(flex_pit, 0, sizeof(flex_pit));
num = i40e_srcoff_to_flx_pit(flex_cfg->src_offset, flex_pit);
if (num > I40E_MAX_FLXPLD_FIED) {
PMD_DRV_LOG(ERR, "exceeds maxmial number of flex fields.");
return -EINVAL;
}
for (i = 0; i < num; i++) {
if (flex_pit[i].size & 0x01 || flex_pit[i].dst_offset & 0x01 ||
flex_pit[i].src_offset & 0x01) {
PMD_DRV_LOG(ERR, "flexpayload should be measured"
" in word");
return -EINVAL;
}
if (i != num - 1)
I40E_VALIDATE_FLEX_PIT(flex_pit[i], flex_pit[i + 1]);
}
return 0;
}
示例7: i40e_pf_host_process_cmd_config_vsi_queues
static int
i40e_pf_host_process_cmd_config_vsi_queues(struct i40e_pf_vf *vf,
uint8_t *msg,
uint16_t msglen)
{
struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
struct i40e_vsi *vsi = vf->vsi;
struct i40e_virtchnl_vsi_queue_config_info *vc_vqci =
(struct i40e_virtchnl_vsi_queue_config_info *)msg;
struct i40e_virtchnl_queue_pair_info *vc_qpi;
int i, ret = I40E_SUCCESS;
if (!msg || vc_vqci->num_queue_pairs > vsi->nb_qps ||
vc_vqci->num_queue_pairs > I40E_MAX_VSI_QP ||
msglen < I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqci,
vc_vqci->num_queue_pairs)) {
PMD_DRV_LOG(ERR, "vsi_queue_config_info argument wrong\n");
ret = I40E_ERR_PARAM;
goto send_msg;
}
vc_qpi = vc_vqci->qpair;
for (i = 0; i < vc_vqci->num_queue_pairs; i++) {
if (vc_qpi[i].rxq.queue_id > vsi->nb_qps - 1 ||
vc_qpi[i].txq.queue_id > vsi->nb_qps - 1) {
ret = I40E_ERR_PARAM;
goto send_msg;
}
/*
* Apply VF RX queue setting to HMC.
* If the opcode is I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
* then the extra information of
* 'struct i40e_virtchnl_queue_pair_extra_info' is needed,
* otherwise set the last parameter to NULL.
*/
if (i40e_pf_host_hmc_config_rxq(hw, vf, &vc_qpi[i].rxq,
I40E_CFG_CRCSTRIP_DEFAULT) != I40E_SUCCESS) {
PMD_DRV_LOG(ERR, "Configure RX queue HMC failed");
ret = I40E_ERR_PARAM;
goto send_msg;
}
/* Apply VF TX queue setting to HMC */
if (i40e_pf_host_hmc_config_txq(hw, vf,
&vc_qpi[i].txq) != I40E_SUCCESS) {
PMD_DRV_LOG(ERR, "Configure TX queue HMC failed");
ret = I40E_ERR_PARAM;
goto send_msg;
}
}
send_msg:
i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
ret, NULL, 0);
return ret;
}
示例8: build_all_dependencies
int
build_all_dependencies(struct rte_eventdev *dev)
{
int err = 0;
unsigned int i;
struct opdl_evdev *device = opdl_pmd_priv(dev);
uint8_t start_qid = 0;
for (i = 0; i < RTE_EVENT_MAX_QUEUES_PER_DEV; i++) {
struct opdl_queue *queue = &device->queue[i];
if (!queue->initialized)
break;
if (queue->q_pos == OPDL_Q_POS_START) {
start_qid = i;
continue;
}
if (queue->q_pos == OPDL_Q_POS_MIDDLE) {
err = opdl_add_deps(device, i, i-1);
if (err < 0) {
PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
"dependency addition for queue:[%u] - FAILED",
dev->data->dev_id,
queue->external_qid);
break;
}
}
if (queue->q_pos == OPDL_Q_POS_END) {
/* Add this dependency */
err = opdl_add_deps(device, i, i-1);
if (err < 0) {
PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
"dependency addition for queue:[%u] - FAILED",
dev->data->dev_id,
queue->external_qid);
break;
}
/* Add dependency for rx on tx */
err = opdl_add_deps(device, start_qid, i);
if (err < 0) {
PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
"dependency addition for queue:[%u] - FAILED",
dev->data->dev_id,
queue->external_qid);
break;
}
}
}
if (!err)
fprintf(stdout, "Success - dependencies built\n");
return err;
}
示例9: qat_crypto_sym_configure_session
void *
qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
struct rte_crypto_sym_xform *xform, void *session_private)
{
struct qat_pmd_private *internals = dev->data->dev_private;
struct qat_session *session = session_private;
int qat_cmd_id;
PMD_INIT_FUNC_TRACE();
/* Get requested QAT command id */
qat_cmd_id = qat_get_cmd_id(xform);
if (qat_cmd_id < 0 || qat_cmd_id >= ICP_QAT_FW_LA_CMD_DELIMITER) {
PMD_DRV_LOG(ERR, "Unsupported xform chain requested");
goto error_out;
}
session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
switch (session->qat_cmd) {
case ICP_QAT_FW_LA_CMD_CIPHER:
session = qat_crypto_sym_configure_session_cipher(dev, xform, session);
break;
case ICP_QAT_FW_LA_CMD_AUTH:
session = qat_crypto_sym_configure_session_auth(dev, xform, session);
break;
case ICP_QAT_FW_LA_CMD_CIPHER_HASH:
session = qat_crypto_sym_configure_session_cipher(dev, xform, session);
session = qat_crypto_sym_configure_session_auth(dev, xform, session);
break;
case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
session = qat_crypto_sym_configure_session_auth(dev, xform, session);
session = qat_crypto_sym_configure_session_cipher(dev, xform, session);
break;
case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
case ICP_QAT_FW_LA_CMD_TRNG_TEST:
case ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE:
case ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE:
case ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE:
case ICP_QAT_FW_LA_CMD_MGF1:
case ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP:
case ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP:
case ICP_QAT_FW_LA_CMD_DELIMITER:
PMD_DRV_LOG(ERR, "Unsupported Service %u",
session->qat_cmd);
goto error_out;
default:
PMD_DRV_LOG(ERR, "Unsupported Service %u",
session->qat_cmd);
goto error_out;
}
return session;
error_out:
rte_mempool_put(internals->sess_mp, session);
return NULL;
}
示例10: avf_init_rss
static int
avf_init_rss(struct avf_adapter *adapter)
{
struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(adapter);
struct rte_eth_rss_conf *rss_conf;
uint8_t i, j, nb_q;
int ret;
rss_conf = &adapter->eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
nb_q = RTE_MIN(adapter->eth_dev->data->nb_rx_queues,
AVF_MAX_NUM_QUEUES);
if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) {
PMD_DRV_LOG(DEBUG, "RSS is not supported");
return -ENOTSUP;
}
if (adapter->eth_dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
PMD_DRV_LOG(WARNING, "RSS is enabled by PF by default");
/* set all lut items to default queue */
for (i = 0; i < vf->vf_res->rss_lut_size; i++)
vf->rss_lut[i] = 0;
ret = avf_configure_rss_lut(adapter);
return ret;
}
/* In AVF, RSS enablement is set by PF driver. It is not supported
* to set based on rss_conf->rss_hf.
*/
/* configure RSS key */
if (!rss_conf->rss_key) {
/* Calculate the default hash key */
for (i = 0; i <= vf->vf_res->rss_key_size; i++)
vf->rss_key[i] = (uint8_t)rte_rand();
} else
rte_memcpy(vf->rss_key, rss_conf->rss_key,
RTE_MIN(rss_conf->rss_key_len,
vf->vf_res->rss_key_size));
/* init RSS LUT table */
for (i = 0, j = 0; i < vf->vf_res->rss_lut_size; i++, j++) {
if (j >= nb_q)
j = 0;
vf->rss_lut[i] = j;
}
/* send virtchnnl ops to configure rss*/
ret = avf_configure_rss_lut(adapter);
if (ret)
return ret;
ret = avf_configure_rss_key(adapter);
if (ret)
return ret;
return 0;
}
示例11: bnx2x_reg_write32
void
bnx2x_reg_write32(struct bnx2x_softc *sc, size_t offset, uint32_t val)
{
if ((offset % 4) != 0) {
PMD_DRV_LOG(DEBUG, "Unaligned 32-bit write to 0x%08lx", offset);
}
PMD_DRV_LOG(DEBUG, "offset=0x%08lx val=0x%08x", offset, val);
*((volatile uint32_t*)((uint64_t)sc->bar[BAR0].base_addr + offset)) = val;
}
示例12: queue_dma_zone_reserve
static const struct rte_memzone *
queue_dma_zone_reserve(const char *queue_name, uint32_t queue_size,
int socket_id)
{
const struct rte_memzone *mz;
unsigned memzone_flags = 0;
const struct rte_memseg *ms;
PMD_INIT_FUNC_TRACE();
mz = rte_memzone_lookup(queue_name);
if (mz != 0) {
if (((size_t)queue_size <= mz->len) &&
((socket_id == SOCKET_ID_ANY) ||
(socket_id == mz->socket_id))) {
PMD_DRV_LOG(DEBUG, "re-use memzone already "
"allocated for %s", queue_name);
return mz;
}
PMD_DRV_LOG(ERR, "Incompatible memzone already "
"allocated %s, size %u, socket %d. "
"Requested size %u, socket %u",
queue_name, (uint32_t)mz->len,
mz->socket_id, queue_size, socket_id);
return NULL;
}
PMD_DRV_LOG(DEBUG, "Allocate memzone for %s, size %u on socket %u",
queue_name, queue_size, socket_id);
ms = rte_eal_get_physmem_layout();
switch (ms[0].hugepage_sz) {
case(RTE_PGSIZE_2M):
memzone_flags = RTE_MEMZONE_2MB;
break;
case(RTE_PGSIZE_1G):
memzone_flags = RTE_MEMZONE_1GB;
break;
case(RTE_PGSIZE_16M):
memzone_flags = RTE_MEMZONE_16MB;
break;
case(RTE_PGSIZE_16G):
memzone_flags = RTE_MEMZONE_16GB;
break;
default:
memzone_flags = RTE_MEMZONE_SIZE_HINT_ONLY;
}
#ifdef RTE_LIBRTE_XEN_DOM0
return rte_memzone_reserve_bounded(queue_name, queue_size,
socket_id, 0, RTE_CACHE_LINE_SIZE, RTE_PGSIZE_2M);
#else
return rte_memzone_reserve_aligned(queue_name, queue_size, socket_id,
memzone_flags, queue_size);
#endif
}
示例13: i40e_vf_representor_vlan_offload_set
static int
i40e_vf_representor_vlan_offload_set(struct rte_eth_dev *ethdev, int mask)
{
struct i40e_vf_representor *representor = ethdev->data->dev_private;
struct rte_eth_dev *pdev;
struct i40e_pf_vf *vf;
struct i40e_vsi *vsi;
struct i40e_pf *pf;
uint32_t vfid;
pdev = representor->adapter->eth_dev;
vfid = representor->vf_id;
if (!is_i40e_supported(pdev)) {
PMD_DRV_LOG(ERR, "Invalid PF dev.");
return -EINVAL;
}
pf = I40E_DEV_PRIVATE_TO_PF(pdev->data->dev_private);
if (vfid >= pf->vf_num || !pf->vfs) {
PMD_DRV_LOG(ERR, "Invalid VF ID.");
return -EINVAL;
}
vf = &pf->vfs[vfid];
vsi = vf->vsi;
if (!vsi) {
PMD_DRV_LOG(ERR, "Invalid VSI.");
return -EINVAL;
}
if (mask & ETH_VLAN_FILTER_MASK) {
/* Enable or disable VLAN filtering offload */
if (ethdev->data->dev_conf.rxmode.offloads &
DEV_RX_OFFLOAD_VLAN_FILTER)
return i40e_vsi_config_vlan_filter(vsi, TRUE);
else
return i40e_vsi_config_vlan_filter(vsi, FALSE);
}
if (mask & ETH_VLAN_STRIP_MASK) {
/* Enable or disable VLAN stripping offload */
if (ethdev->data->dev_conf.rxmode.offloads &
DEV_RX_OFFLOAD_VLAN_STRIP)
return i40e_vsi_config_vlan_stripping(vsi, TRUE);
else
return i40e_vsi_config_vlan_stripping(vsi, FALSE);
}
return -EINVAL;
}
示例14: i40e_fdir_configure
/*
* Configure flow director related setting
*/
int
i40e_fdir_configure(struct rte_eth_dev *dev)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_eth_fdir_flex_conf *conf;
enum i40e_filter_pctype pctype;
uint32_t val;
uint8_t i;
int ret = 0;
/*
* configuration need to be done before
* flow director filters are added
* If filters exist, flush them.
*/
if (i40e_fdir_empty(hw) < 0) {
ret = i40e_fdir_flush(dev);
if (ret) {
PMD_DRV_LOG(ERR, "failed to flush fdir table.");
return ret;
}
}
/* enable FDIR filter */
val = I40E_READ_REG(hw, I40E_PFQF_CTL_0);
val |= I40E_PFQF_CTL_0_FD_ENA_MASK;
I40E_WRITE_REG(hw, I40E_PFQF_CTL_0, val);
i40e_init_flx_pld(pf); /* set flex config to default value */
conf = &dev->data->dev_conf.fdir_conf.flex_conf;
ret = i40e_check_fdir_flex_conf(conf);
if (ret < 0) {
PMD_DRV_LOG(ERR, " invalid configuration arguments.");
return -EINVAL;
}
/* configure flex payload */
for (i = 0; i < conf->nb_payloads; i++)
i40e_set_flx_pld_cfg(pf, &conf->flex_set[i]);
/* configure flex mask*/
for (i = 0; i < conf->nb_flexmasks; i++) {
pctype = i40e_flowtype_to_pctype(
conf->flex_mask[i].flow_type);
i40e_set_flex_mask_on_pctype(pf,
pctype,
&conf->flex_mask[i]);
}
return ret;
}
示例15: bnx2x_reg_read16
uint16_t
bnx2x_reg_read16(struct bnx2x_softc *sc, size_t offset)
{
uint16_t val;
if ((offset % 2) != 0) {
PMD_DRV_LOG(DEBUG, "Unaligned 16-bit read from 0x%08lx", offset);
}
val = (uint16_t)(*((volatile uint16_t*)((uint64_t)sc->bar[BAR0].base_addr + offset)));
PMD_DRV_LOG(DEBUG, "offset=0x%08lx val=0x%08x", offset, val);
return (val);
}