当前位置: 首页>>代码示例>>C++>>正文


C++ per_cpu_ptr函数代码示例

本文整理汇总了C++中per_cpu_ptr函数的典型用法代码示例。如果您正苦于以下问题:C++ per_cpu_ptr函数的具体用法?C++ per_cpu_ptr怎么用?C++ per_cpu_ptr使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了per_cpu_ptr函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: nicvf_get_ethtool_stats

static void nicvf_get_ethtool_stats(struct net_device *netdev,
				    struct ethtool_stats *stats, u64 *data)
{
	struct nicvf *nic = netdev_priv(netdev);
	int stat, tmp_stats;
	int sqs, cpu;

	nicvf_update_stats(nic);

	/* Update LMAC stats */
	nicvf_update_lmac_stats(nic);

	for (stat = 0; stat < nicvf_n_hw_stats; stat++)
		*(data++) = ((u64 *)&nic->hw_stats)
				[nicvf_hw_stats[stat].index];
	for (stat = 0; stat < nicvf_n_drv_stats; stat++) {
		tmp_stats = 0;
		for_each_possible_cpu(cpu)
			tmp_stats += ((u64 *)per_cpu_ptr(nic->drv_stats, cpu))
				     [nicvf_drv_stats[stat].index];
		*(data++) = tmp_stats;
	}

	nicvf_get_qset_stats(nic, stats, &data);

	for (sqs = 0; sqs < nic->sqs_count; sqs++) {
		if (!nic->snicvf[sqs])
			continue;
		nicvf_get_qset_stats(nic->snicvf[sqs], stats, &data);
	}

	for (stat = 0; stat < BGX_RX_STATS_COUNT; stat++)
		*(data++) = nic->bgx_stats.rx_stats[stat];
	for (stat = 0; stat < BGX_TX_STATS_COUNT; stat++)
		*(data++) = nic->bgx_stats.tx_stats[stat];
}
开发者ID:forgivemyheart,项目名称:linux,代码行数:36,代码来源:nicvf_ethtool.c

示例2: ovs_dp_upcall

int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
	      const struct dp_upcall_info *upcall_info)
{
	struct dp_stats_percpu *stats;
	int dp_ifindex;
	int err;

	if (upcall_info->pid == 0) {
		err = -ENOTCONN;
		goto err;
	}

	dp_ifindex = get_dpifindex(dp);
	if (!dp_ifindex) {
		err = -ENODEV;
		goto err;
	}

	if (!skb_is_gso(skb))
		err = queue_userspace_packet(dp_ifindex, skb, upcall_info);
	else
		err = queue_gso_packets(dp_ifindex, skb, upcall_info);
	if (err)
		goto err;

	return 0;

err:
	stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());

	u64_stats_update_begin(&stats->sync);
	stats->n_lost++;
	u64_stats_update_end(&stats->sync);

	return err;
}
开发者ID:daveti,项目名称:prov-kernel,代码行数:36,代码来源:datapath.c

示例3: probe_mt65xx_mon_tracepoint

static void
probe_mt65xx_mon_tracepoint(void *ignore, struct task_struct *prev,
                            struct task_struct *next)
{
    struct trace_array_cpu *data;
    unsigned long flags;
    int cpu;
    int pc;

    if (unlikely(!mt65xx_mon_ref))
        return;

    if (!mt65xx_mon_enabled || mt65xx_mon_stopped)
        return;

    if(prev)
        tracing_record_cmdline(prev);
    if(next)
        tracing_record_cmdline(next);
    tracing_record_cmdline(current);

    pc = preempt_count();
    //local_irq_save(flags);
    spin_lock_irqsave(&mt65xx_mon_spinlock, flags);
    cpu = raw_smp_processor_id();
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)
    data = mt65xx_mon_trace->data[cpu];
#else
    data = per_cpu_ptr(mt65xx_mon_trace->trace_buffer.data, cpu);
#endif

    if (likely(!atomic_read(&data->disabled)))
        tracing_mt65xx_mon_function(mt65xx_mon_trace, prev, next, flags, pc);
    spin_unlock_irqrestore(&mt65xx_mon_spinlock, flags);
    //local_irq_restore(flags);
}
开发者ID:Scorpio92,项目名称:mediatek,代码行数:36,代码来源:trace_mt65xx_mon.c

示例4: thread_group_cputime

/**
 * thread_group_cputime - Sum the thread group time fields across all CPUs.
 *
 * @tsk:	The task we use to identify the thread group.
 * @times:	task_cputime structure in which we return the summed fields.
 *
 * Walk the list of CPUs to sum the per-CPU time fields in the thread group
 * time structure.
 */
void thread_group_cputime(
	struct task_struct *tsk,
	struct task_cputime *times)
{
	struct signal_struct *sig;
	int i;
	struct task_cputime *tot;

	sig = tsk->signal;
	if (unlikely(!sig) || !sig->cputime.totals) {
		times->utime = tsk->utime;
		times->stime = tsk->stime;
		times->sum_exec_runtime = tsk->se.sum_exec_runtime;
		return;
	}
	times->stime = times->utime = cputime_zero;
	times->sum_exec_runtime = 0;
	for_each_possible_cpu(i) {
		tot = per_cpu_ptr(tsk->signal->cputime.totals, i);
		times->utime = cputime_add(times->utime, tot->utime);
		times->stime = cputime_add(times->stime, tot->stime);
		times->sum_exec_runtime += tot->sum_exec_runtime;
	}
}
开发者ID:traveller42,项目名称:linux-2.6.28.mx233-falconwing,代码行数:33,代码来源:posix-cpu-timers.c

示例5: cpumask_weight

static struct padata_priv *padata_get_next(struct parallel_data *pd)
{
	int cpu, num_cpus;
	unsigned int next_nr, next_index;
	struct padata_parallel_queue *queue, *next_queue;
	struct padata_priv *padata;
	struct padata_list *reorder;

	num_cpus = cpumask_weight(pd->cpumask.pcpu);

	/*
                                                       
                              
  */
	next_nr = pd->processed;
	next_index = next_nr % num_cpus;
	cpu = padata_index_to_cpu(pd, next_index);
	next_queue = per_cpu_ptr(pd->pqueue, cpu);

	padata = NULL;

	reorder = &next_queue->reorder;

	if (!list_empty(&reorder->list)) {
		padata = list_entry(reorder->list.next,
				    struct padata_priv, list);

		spin_lock(&reorder->lock);
		list_del_init(&padata->list);
		atomic_dec(&pd->reorder_objects);
		spin_unlock(&reorder->lock);

		pd->processed++;

		goto out;
	}
开发者ID:romanbb,项目名称:android_kernel_lge_d851,代码行数:36,代码来源:padata.c

示例6: minit

static int minit(void)
{
	int cpu;
	unsigned long *this;
	unsigned long file_size = (10 * 1024 * 1024 * 1024);
	unsigned long chunk_num = file_size / CHUNKSIZE;
	unsigned long bytes_num = chunk_num / sizeof(int);

	printk("Start %s.\n", THIS_MODULE->name);
	
	percpu_ptr = alloc_percpu(unsigned long);

	for_each_online_cpu(cpu) {
		this = *per_cpu_ptr(percpu_ptr, cpu);;
		//alloc memory for every percpu-value.
		this = vmalloc(bytes_num);
		if (!this) {
			printk(KERN_ERR "alloc bitmap failed.");
			return -ENOMEM;
		}	
	}

	return 0;
}
开发者ID:imflyfish,项目名称:kernel_test,代码行数:24,代码来源:testper.c

示例7: dev_txq_stats_fold

static struct net_device_stats *vlan_dev_get_stats(struct net_device *dev)
{
	struct net_device_stats *stats = &dev->stats;

	dev_txq_stats_fold(dev, stats);

	if (vlan_dev_info(dev)->vlan_rx_stats) {
		struct vlan_rx_stats *p, rx = {0};
		int i;

		for_each_possible_cpu(i) {
			p = per_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats, i);
			rx.rx_packets += p->rx_packets;
			rx.rx_bytes   += p->rx_bytes;
			rx.rx_errors  += p->rx_errors;
			rx.multicast  += p->multicast;
		}
		stats->rx_packets = rx.rx_packets;
		stats->rx_bytes   = rx.rx_bytes;
		stats->rx_errors  = rx.rx_errors;
		stats->multicast  = rx.multicast;
	}
	return stats;
}
开发者ID:mfleming,项目名称:linux-2.6,代码行数:24,代码来源:vlan_dev.c

示例8: for_each_possible_cpu

static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{

	if (vlan_dev_priv(dev)->vlan_pcpu_stats) {
		struct vlan_pcpu_stats *p;
		u32 rx_errors = 0, tx_dropped = 0;
		int i;

		for_each_possible_cpu(i) {
			u64 rxpackets, rxbytes, rxmulticast, txpackets, txbytes;
			unsigned int start;

			p = per_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats, i);
			do {
				start = u64_stats_fetch_begin_bh(&p->syncp);
				rxpackets	= p->rx_packets;
				rxbytes		= p->rx_bytes;
				rxmulticast	= p->rx_multicast;
				txpackets	= p->tx_packets;
				txbytes		= p->tx_bytes;
			} while (u64_stats_fetch_retry_bh(&p->syncp, start));

			stats->rx_packets	+= rxpackets;
			stats->rx_bytes		+= rxbytes;
			stats->multicast	+= rxmulticast;
			stats->tx_packets	+= txpackets;
			stats->tx_bytes		+= txbytes;
			/* rx_errors & tx_dropped are u32 */
			rx_errors	+= p->rx_errors;
			tx_dropped	+= p->tx_dropped;
		}
		stats->rx_errors  = rx_errors;
		stats->tx_dropped = tx_dropped;
	}
	return stats;
}
开发者ID:AmesianX,项目名称:netlink-mmap,代码行数:36,代码来源:vlan_dev.c

示例9: caam_qi_shutdown

void caam_qi_shutdown(struct device *qidev)
{
	int i;
	struct caam_qi_priv *priv = dev_get_drvdata(qidev);
	const cpumask_t *cpus = qman_affine_cpus();

	for_each_cpu(i, cpus) {
		struct napi_struct *irqtask;

		irqtask = &per_cpu_ptr(&pcpu_qipriv.caam_napi, i)->irqtask;
		napi_disable(irqtask);
		netif_napi_del(irqtask);

		if (kill_fq(qidev, per_cpu(pcpu_qipriv.rsp_fq, i)))
			dev_err(qidev, "Rsp FQ kill failed, cpu: %d\n", i);
	}

	qman_delete_cgr_safe(&priv->cgr);
	qman_release_cgrid(priv->cgr.cgrid);

	kmem_cache_destroy(qi_cache);

	platform_device_unregister(priv->qi_pdev);
}
开发者ID:AlexShiLucky,项目名称:linux,代码行数:24,代码来源:qi.c

示例10: desc_set_defaults

static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node,
			      const struct cpumask *affinity, struct module *owner)
{
	int cpu;

	desc->irq_common_data.handler_data = NULL;
	desc->irq_common_data.msi_desc = NULL;

	desc->irq_data.common = &desc->irq_common_data;
	desc->irq_data.irq = irq;
	desc->irq_data.chip = &no_irq_chip;
	desc->irq_data.chip_data = NULL;
	irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS);
	irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
	desc->handle_irq = handle_bad_irq;
	desc->depth = 1;
	desc->irq_count = 0;
	desc->irqs_unhandled = 0;
	desc->name = NULL;
	desc->owner = owner;
	for_each_possible_cpu(cpu)
		*per_cpu_ptr(desc->kstat_irqs, cpu) = 0;
	desc_smp_init(desc, node, affinity);
}
开发者ID:AshishNamdev,项目名称:linux,代码行数:24,代码来源:irqdesc.c

示例11: ixgbe_fcoe_ddp_setup

/**
 * ixgbe_fcoe_ddp_setup - called to set up ddp context
 * @netdev: the corresponding net_device
 * @xid: the exchange id requesting ddp
 * @sgl: the scatter-gather list for this request
 * @sgc: the number of scatter-gather items
 *
 * Returns : 1 for success and 0 for no ddp
 */
static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
				struct scatterlist *sgl, unsigned int sgc,
				int target_mode)
{
	struct ixgbe_adapter *adapter;
	struct ixgbe_hw *hw;
	struct ixgbe_fcoe *fcoe;
	struct ixgbe_fcoe_ddp *ddp;
	struct ixgbe_fcoe_ddp_pool *ddp_pool;
	struct scatterlist *sg;
	unsigned int i, j, dmacount;
	unsigned int len;
	static const unsigned int bufflen = IXGBE_FCBUFF_MIN;
	unsigned int firstoff = 0;
	unsigned int lastsize;
	unsigned int thisoff = 0;
	unsigned int thislen = 0;
	u32 fcbuff, fcdmarw, fcfltrw, fcrxctl;
	dma_addr_t addr = 0;

	if (!netdev || !sgl)
		return 0;

	adapter = netdev_priv(netdev);
	if (xid >= IXGBE_FCOE_DDP_MAX) {
		e_warn(drv, "xid=0x%x out-of-range\n", xid);
		return 0;
	}

	/* no DDP if we are already down or resetting */
	if (test_bit(__IXGBE_DOWN, &adapter->state) ||
	    test_bit(__IXGBE_RESETTING, &adapter->state))
		return 0;

	fcoe = &adapter->fcoe;
	ddp = &fcoe->ddp[xid];
	if (ddp->sgl) {
		e_err(drv, "xid 0x%x w/ non-null sgl=%p nents=%d\n",
		      xid, ddp->sgl, ddp->sgc);
		return 0;
	}
	ixgbe_fcoe_clear_ddp(ddp);


	if (!fcoe->ddp_pool) {
		e_warn(drv, "No ddp_pool resources allocated\n");
		return 0;
	}

	ddp_pool = per_cpu_ptr(fcoe->ddp_pool, get_cpu());
	if (!ddp_pool->pool) {
		e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid);
		goto out_noddp;
	}

	/* setup dma from scsi command sgl */
	dmacount = dma_map_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE);
	if (dmacount == 0) {
		e_err(drv, "xid 0x%x DMA map error\n", xid);
		goto out_noddp;
	}

	/* alloc the udl from per cpu ddp pool */
	ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp);
	if (!ddp->udl) {
		e_err(drv, "failed allocated ddp context\n");
		goto out_noddp_unmap;
	}
	ddp->pool = ddp_pool->pool;
	ddp->sgl = sgl;
	ddp->sgc = sgc;

	j = 0;
	for_each_sg(sgl, sg, dmacount, i) {
		addr = sg_dma_address(sg);
		len = sg_dma_len(sg);
		while (len) {
			/* max number of buffers allowed in one DDP context */
			if (j >= IXGBE_BUFFCNT_MAX) {
				ddp_pool->noddp++;
				goto out_noddp_free;
			}

			/* get the offset of length of current buffer */
			thisoff = addr & ((dma_addr_t)bufflen - 1);
			thislen = min((bufflen - thisoff), len);
			/*
			 * all but the 1st buffer (j == 0)
			 * must be aligned on bufflen
			 */
			if ((j != 0) && (thisoff))
//.........这里部分代码省略.........
开发者ID:insop,项目名称:linux,代码行数:101,代码来源:ixgbe_fcoe.c

示例12: tegra_init_timer

static int __init tegra_init_timer(struct device_node *np, bool tegra20)
{
	struct timer_of *to;
	int cpu, ret;

	to = this_cpu_ptr(&tegra_to);
	ret = timer_of_init(np, to);
	if (ret)
		goto out;

	timer_reg_base = timer_of_base(to);

	/*
	 * Configure microsecond timers to have 1MHz clock
	 * Config register is 0xqqww, where qq is "dividend", ww is "divisor"
	 * Uses n+1 scheme
	 */
	switch (timer_of_rate(to)) {
	case 12000000:
		usec_config = 0x000b; /* (11+1)/(0+1) */
		break;
	case 12800000:
		usec_config = 0x043f; /* (63+1)/(4+1) */
		break;
	case 13000000:
		usec_config = 0x000c; /* (12+1)/(0+1) */
		break;
	case 16800000:
		usec_config = 0x0453; /* (83+1)/(4+1) */
		break;
	case 19200000:
		usec_config = 0x045f; /* (95+1)/(4+1) */
		break;
	case 26000000:
		usec_config = 0x0019; /* (25+1)/(0+1) */
		break;
	case 38400000:
		usec_config = 0x04bf; /* (191+1)/(4+1) */
		break;
	case 48000000:
		usec_config = 0x002f; /* (47+1)/(0+1) */
		break;
	default:
		ret = -EINVAL;
		goto out;
	}

	writel_relaxed(usec_config, timer_reg_base + TIMERUS_USEC_CFG);

	for_each_possible_cpu(cpu) {
		struct timer_of *cpu_to = per_cpu_ptr(&tegra_to, cpu);
		unsigned int base = tegra_base_for_cpu(cpu, tegra20);
		unsigned int idx = tegra_irq_idx_for_cpu(cpu, tegra20);

		/*
		 * TIMER1-9 are fixed to 1MHz, TIMER10-13 are running off the
		 * parent clock.
		 */
		if (tegra20)
			cpu_to->of_clk.rate = 1000000;

		cpu_to = per_cpu_ptr(&tegra_to, cpu);
		cpu_to->of_base.base = timer_reg_base + base;
		cpu_to->clkevt.cpumask = cpumask_of(cpu);
		cpu_to->clkevt.irq = irq_of_parse_and_map(np, idx);
		if (!cpu_to->clkevt.irq) {
			pr_err("failed to map irq for cpu%d\n", cpu);
			ret = -EINVAL;
			goto out_irq;
		}

		irq_set_status_flags(cpu_to->clkevt.irq, IRQ_NOAUTOEN);
		ret = request_irq(cpu_to->clkevt.irq, tegra_timer_isr,
				  IRQF_TIMER | IRQF_NOBALANCING,
				  cpu_to->clkevt.name, &cpu_to->clkevt);
		if (ret) {
			pr_err("failed to set up irq for cpu%d: %d\n",
			       cpu, ret);
			irq_dispose_mapping(cpu_to->clkevt.irq);
			cpu_to->clkevt.irq = 0;
			goto out_irq;
		}
	}

	sched_clock_register(tegra_read_sched_clock, 32, 1000000);

	ret = clocksource_mmio_init(timer_reg_base + TIMERUS_CNTR_1US,
				    "timer_us", 1000000,
				    300, 32, clocksource_mmio_readl_up);
	if (ret)
		pr_err("failed to register clocksource: %d\n", ret);

#ifdef CONFIG_ARM
	register_current_timer_delay(&tegra_delay_timer);
#endif

	ret = cpuhp_setup_state(CPUHP_AP_TEGRA_TIMER_STARTING,
				"AP_TEGRA_TIMER_STARTING", tegra_timer_setup,
				tegra_timer_stop);
	if (ret)
//.........这里部分代码省略.........
开发者ID:grate-driver,项目名称:linux,代码行数:101,代码来源:timer-tegra20.c

示例13: per_cpu_ptr

static struct cgroup_cpu_stat *cgroup_cpu_stat(struct cgroup *cgrp, int cpu)
{
	return per_cpu_ptr(cgrp->cpu_stat, cpu);
}
开发者ID:ReneNyffenegger,项目名称:linux,代码行数:4,代码来源:stat.c

示例14: ipcomp_decompress

static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb)
{
	struct ipcomp_data *ipcd = x->data;
	const int plen = skb->len;
	int dlen = IPCOMP_SCRATCH_SIZE;
	const u8 *start = skb->data;
	const int cpu = get_cpu();
	u8 *scratch = *per_cpu_ptr(ipcomp_scratches, cpu);
	struct crypto_comp *tfm = *per_cpu_ptr(ipcd->tfms, cpu);
	int err = crypto_comp_decompress(tfm, start, plen, scratch, &dlen);
	int len;

	if (err)
		goto out;

	if (dlen < (plen + sizeof(struct ip_comp_hdr))) {
		err = -EINVAL;
		goto out;
	}

	len = dlen - plen;
	if (len > skb_tailroom(skb))
		len = skb_tailroom(skb);

	__skb_put(skb, len);

	len += plen;
	skb_copy_to_linear_data(skb, scratch, len);

	while ((scratch += len, dlen -= len) > 0) {
		skb_frag_t *frag;
		struct page *page;

		err = -EMSGSIZE;
		if (WARN_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS))
			goto out;

		frag = skb_shinfo(skb)->frags + skb_shinfo(skb)->nr_frags;
		page = alloc_page(GFP_ATOMIC);

		err = -ENOMEM;
		if (!page)
			goto out;

		__skb_frag_set_page(frag, page);

		len = PAGE_SIZE;
		if (dlen < len)
			len = dlen;

		frag->page_offset = 0;
		skb_frag_size_set(frag, len);
		memcpy(skb_frag_address(frag), scratch, len);

		skb->truesize += len;
		skb->data_len += len;
		skb->len += len;

		skb_shinfo(skb)->nr_frags++;
	}

	err = 0;

out:
	put_cpu();
	return err;
}
开发者ID:Albinoman887,项目名称:pyramid-3.4.10,代码行数:67,代码来源:xfrm_ipcomp.c

示例15: dpaa2_eth_get_ethtool_stats

/** Fill in hardware counters, as returned by MC.
 */
static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
					struct ethtool_stats *stats,
					u64 *data)
{
	int i = 0;
	int j, k, err;
	int num_cnt;
	union dpni_statistics dpni_stats;
	u32 fcnt, bcnt;
	u32 fcnt_rx_total = 0, fcnt_tx_total = 0;
	u32 bcnt_rx_total = 0, bcnt_tx_total = 0;
	u32 buf_cnt;
	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
	struct dpaa2_eth_drv_stats *extras;
	struct dpaa2_eth_ch_stats *ch_stats;

	memset(data, 0,
	       sizeof(u64) * (DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS));

	/* Print standard counters, from DPNI statistics */
	for (j = 0; j <= 2; j++) {
		err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token,
					  j, &dpni_stats);
		if (err != 0)
			netdev_warn(net_dev, "dpni_get_stats(%d) failed\n", j);
		switch (j) {
		case 0:
			num_cnt = sizeof(dpni_stats.page_0) / sizeof(u64);
			break;
		case 1:
			num_cnt = sizeof(dpni_stats.page_1) / sizeof(u64);
			break;
		case 2:
			num_cnt = sizeof(dpni_stats.page_2) / sizeof(u64);
			break;
		}
		for (k = 0; k < num_cnt; k++)
			*(data + i++) = dpni_stats.raw.counter[k];
	}

	/* Print per-cpu extra stats */
	for_each_online_cpu(k) {
		extras = per_cpu_ptr(priv->percpu_extras, k);
		for (j = 0; j < sizeof(*extras) / sizeof(__u64); j++)
			*((__u64 *)data + i + j) += *((__u64 *)extras + j);
	}
	i += j;

	/* Per-channel stats */
	for (k = 0; k < priv->num_channels; k++) {
		ch_stats = &priv->channel[k]->stats;
		for (j = 0; j < sizeof(*ch_stats) / sizeof(__u64); j++)
			*((__u64 *)data + i + j) += *((__u64 *)ch_stats + j);
	}
	i += j;

	for (j = 0; j < priv->num_fqs; j++) {
		/* Print FQ instantaneous counts */
		err = dpaa2_io_query_fq_count(NULL, priv->fq[j].fqid,
					      &fcnt, &bcnt);
		if (err) {
			netdev_warn(net_dev, "FQ query error %d", err);
			return;
		}

		if (priv->fq[j].type == DPAA2_TX_CONF_FQ) {
			fcnt_tx_total += fcnt;
			bcnt_tx_total += bcnt;
		} else {
			fcnt_rx_total += fcnt;
			bcnt_rx_total += bcnt;
		}
	}

	*(data + i++) = fcnt_rx_total;
	*(data + i++) = bcnt_rx_total;
	*(data + i++) = fcnt_tx_total;
	*(data + i++) = bcnt_tx_total;

	err = dpaa2_io_query_bp_count(NULL, priv->bpid, &buf_cnt);
	if (err) {
		netdev_warn(net_dev, "Buffer count query error %d\n", err);
		return;
	}
	*(data + i++) = buf_cnt;
}
开发者ID:AlexShiLucky,项目名称:linux,代码行数:88,代码来源:dpaa2-ethtool.c


注:本文中的per_cpu_ptr函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。