本文整理汇总了C++中prefetch函数的典型用法代码示例。如果您正苦于以下问题:C++ prefetch函数的具体用法?C++ prefetch怎么用?C++ prefetch使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了prefetch函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: prefetch
static char *url_buff_gets(URL url, char *buff, int maxsiz)
{
URL_buff *urlp = (URL_buff *)url;
int c, r, w;
long len, maxlen;
int newline = url_newline_code;
unsigned char *bp;
if(urlp->eof)
return NULL;
maxlen = maxsiz - 1;
if(maxlen == 0)
*buff = '\0';
if(maxlen <= 0)
return buff;
len = 0;
r = urlp->rp;
w = urlp->wp;
bp = urlp->buffer;
do
{
if(r == w)
{
urlp->wp = w;
prefetch(urlp);
w = urlp->wp;
if(r == w)
{
urlp->eof = 1;
if(len == 0)
return NULL;
buff[len] = '\0';
urlp->pos += len;
urlp->rp = r;
return buff;
}
}
c = bp[r];
buff[len++] = c;
r = ((r + 1) & BASEMASK);
} while(c != newline && len < maxlen);
buff[len] = '\0';
urlp->pos += len;
urlp->rp = r;
return buff;
}
示例2: __rcu_process_callbacks
/*
* Invoke the RCU callbacks on the specified rcu_ctrlkblk structure
* whose grace period has elapsed.
*/
static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
{
const char *rn = NULL;
struct rcu_head *next, *list;
unsigned long flags;
RCU_TRACE(int cb_count = 0);
/* If no RCU callbacks ready to invoke, just return. */
if (&rcp->rcucblist == rcp->donetail) {
RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, 0, -1));
RCU_TRACE(trace_rcu_batch_end(rcp->name, 0,
!!ACCESS_ONCE(rcp->rcucblist),
need_resched(),
is_idle_task(current),
false));
return;
}
/* Move the ready-to-invoke callbacks to a local list. */
local_irq_save(flags);
RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, rcp->qlen, -1));
list = rcp->rcucblist;
rcp->rcucblist = *rcp->donetail;
*rcp->donetail = NULL;
if (rcp->curtail == rcp->donetail) {
rcp->curtail = &rcp->rcucblist;
}
rcp->donetail = &rcp->rcucblist;
local_irq_restore(flags);
/* Invoke the callbacks on the local list. */
RCU_TRACE(rn = rcp->name);
while (list) {
next = list->next;
prefetch(next);
debug_rcu_head_unqueue(list);
local_bh_disable();
__rcu_reclaim(rn, list);
local_bh_enable();
list = next;
RCU_TRACE(cb_count++);
}
RCU_TRACE(rcu_trace_sub_qlen(rcp, cb_count));
RCU_TRACE(trace_rcu_batch_end(rcp->name,
cb_count, 0, need_resched(),
is_idle_task(current),
false));
}
示例3: url_buff_read
static long url_buff_read(URL url, void *buff, long n)
{
URL_buff *urlp = (URL_buff *)url;
char *s = (char *)buff;
int r, i, j;
if(urlp->eof)
return 0;
r = urlp->rp;
if(r == urlp->wp)
{
prefetch(urlp);
if(r == urlp->wp)
{
urlp->eof = 1;
return EOF;
}
}
/* first fragment */
i = urlp->wp - r;
if(i < 0)
i = BASESIZE - r;
if(i > n)
i = n;
memcpy(s, urlp->buffer + r, i);
r = ((r + i) & BASEMASK);
if(i == n || r == urlp->wp || r != 0)
{
urlp->rp = r;
urlp->pos += i;
return i;
}
/* second fragment */
j = urlp->wp;
n -= i;
s += i;
if(j > n)
j = n;
memcpy(s, urlp->buffer, j);
urlp->rp = j;
urlp->pos += i + j;
return i + j;
}
示例4: write_packet
static int write_packet(struct imx_ep_struct *imx_ep, struct imx_request *req)
{
u8 *buf;
int length, count, temp;
if (unlikely(__raw_readl(imx_ep->imx_usb->base +
USB_EP_STAT(EP_NO(imx_ep))) & EPSTAT_ZLPS)) {
D_TRX(imx_ep->imx_usb->dev, "<%s> zlp still queued in EP %s\n",
__func__, imx_ep->ep.name);
return -1;
}
buf = req->req.buf + req->req.actual;
prefetch(buf);
length = min(req->req.length - req->req.actual, (u32)imx_ep->fifosize);
if (imx_fifo_bcount(imx_ep) + length > imx_ep->fifosize) {
D_TRX(imx_ep->imx_usb->dev, "<%s> packet overfill %s fifo\n",
__func__, imx_ep->ep.name);
return -1;
}
req->req.actual += length;
count = length;
if (!count && req->req.zero) { /* zlp */
temp = __raw_readl(imx_ep->imx_usb->base
+ USB_EP_STAT(EP_NO(imx_ep)));
__raw_writel(temp | EPSTAT_ZLPS, imx_ep->imx_usb->base
+ USB_EP_STAT(EP_NO(imx_ep)));
D_TRX(imx_ep->imx_usb->dev, "<%s> zero packet\n", __func__);
return 0;
}
while (count--) {
if (count == 0) { /* last byte */
temp = __raw_readl(imx_ep->imx_usb->base
+ USB_EP_FCTRL(EP_NO(imx_ep)));
__raw_writel(temp | FCTRL_WFR, imx_ep->imx_usb->base
+ USB_EP_FCTRL(EP_NO(imx_ep)));
}
__raw_writeb(*buf++,
imx_ep->imx_usb->base + USB_EP_FDAT0(EP_NO(imx_ep)));
}
return length;
}
示例5: printf
void AudioPrefetch::seek(unsigned seekTo)
{
// printf("seek %d\n", seekTo);
#ifdef AUDIOPREFETCH_DEBUG
printf("AudioPrefetch::seek to:%u seekCount:%d\n", seekTo, seekCount);
#endif
// Speedup: More than one seek message pending?
// Eat up seek messages until we get to the very LATEST one,
// because all the rest which came before it are irrelevant now,
// and processing them all was taking extreme time, especially with
// resampling enabled.
// In particular, when the user 'slides' the play cursor back and forth
// there are MANY seek messages in the pipe, and with resampling enabled
// it was taking minutes to finish seeking. If the user hit play during that time,
// things were messed up (FIFO underruns, choppy intermittent sound etc).
// Added by Tim. p3.3.20
if (seekCount > 1)
{
--seekCount;
return;
}
writePos = seekTo;
bool isFirstPrefetch = true;
for (unsigned int i = 0; i < (fifoLength) - 1; ++i)//prevent compiler warning: comparison of signed/unsigned
{
// Indicate do a seek command before read, but only on the first pass.
// Changed by Tim. p3.3.17
//prefetch();
prefetch(isFirstPrefetch);
isFirstPrefetch = false;
// To help speed things up even more, check the count again. Return if more seek messages are pending.
// Added by Tim. p3.3.20
if (seekCount > 1)
{
--seekCount;
return;
}
}
seekPos = seekTo;
//seekDone = true;
--seekCount;
}
示例6: bnxt_qplib_creq_irq
static irqreturn_t bnxt_qplib_creq_irq(int irq, void *dev_instance)
{
struct bnxt_qplib_rcfw *rcfw = dev_instance;
struct bnxt_qplib_hwq *creq = &rcfw->creq;
struct creq_base **creq_ptr;
u32 sw_cons;
/* Prefetch the CREQ element */
sw_cons = HWQ_CMP(creq->cons, creq);
creq_ptr = (struct creq_base **)rcfw->creq.pbl_ptr;
prefetch(&creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)]);
tasklet_schedule(&rcfw->worker);
return IRQ_HANDLED;
}
示例7: setReferenceName
void GenomeSequence::setup(const char *referenceFilename)
{
setReferenceName(referenceFilename);
if (_progressStream) *_progressStream << "open and prefetch reference genome " << referenceFilename << ": " << std::flush;
if (open(false))
{
std::cerr << "Failed to open reference genome " << referenceFilename << std::endl;
std::cerr << errorStr << std::endl;
exit(1);
}
prefetch();
if (_progressStream) *_progressStream << "done." << std::endl << std::flush;
}
示例8: __efx_rx_packet
/* Handle a received packet. Second half: Touches packet payload. */
void __efx_rx_packet(struct efx_channel *channel,
struct efx_rx_buffer *rx_buf, bool checksummed)
{
struct efx_nic *efx = channel->efx;
struct sk_buff *skb;
/* If we're in loopback test, then pass the packet directly to the
* loopback layer, and free the rx_buf here
*/
if (unlikely(efx->loopback_selftest)) {
efx_loopback_rx_packet(efx, rx_buf->data, rx_buf->len);
efx_free_rx_buffer(efx, rx_buf);
return;
}
if (rx_buf->skb) {
prefetch(skb_shinfo(rx_buf->skb));
skb_put(rx_buf->skb, rx_buf->len);
/* Move past the ethernet header. rx_buf->data still points
* at the ethernet header */
rx_buf->skb->protocol = eth_type_trans(rx_buf->skb,
efx->net_dev);
skb_record_rx_queue(rx_buf->skb, channel->channel);
}
if (likely(checksummed || rx_buf->page)) {
efx_rx_packet_lro(channel, rx_buf, checksummed);
return;
}
/* We now own the SKB */
skb = rx_buf->skb;
rx_buf->skb = NULL;
EFX_BUG_ON_PARANOID(!skb);
/* Set the SKB flags */
skb->ip_summed = CHECKSUM_NONE;
/* Pass the packet up */
netif_receive_skb(skb);
/* Update allocation strategy method */
channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
}
示例9: sfe_cm_recv
/*
* sfe_cm_recv()
* Handle packet receives.
*
* Returns 1 if the packet is forwarded or 0 if it isn't.
*/
int sfe_cm_recv(struct sk_buff *skb)
{
struct net_device *dev;
#if (SFE_HOOK_ABOVE_BRIDGE)
struct in_device *in_dev;
#endif
/*
* We know that for the vast majority of packets we need the transport
* layer header so we may as well start to fetch it now!
*/
prefetch(skb->data + 32);
barrier();
dev = skb->dev;
#if (SFE_HOOK_ABOVE_BRIDGE)
/*
* Does our input device support IP processing?
*/
in_dev = (struct in_device *)dev->ip_ptr;
if (unlikely(!in_dev)) {
DEBUG_TRACE("no IP processing for device: %s\n", dev->name);
return 0;
}
/*
* Does it have an IP address? If it doesn't then we can't do anything
* interesting here!
*/
if (unlikely(!in_dev->ifa_list)) {
DEBUG_TRACE("no IP address for device: %s\n", dev->name);
return 0;
}
#endif
/*
* We're only interested in IP packets.
*/
if (likely(htons(ETH_P_IP) == skb->protocol)) {
return sfe_ipv4_recv(dev, skb);
}
DEBUG_TRACE("not IP packet\n");
return 0;
}
示例10: isp1362_write_ptd
static void isp1362_write_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
struct isp1362_ep_queue *epq)
{
struct ptd *ptd = &ep->ptd;
int len = PTD_GET_DIR(ptd) == PTD_DIR_IN ? 0 : ep->length;
_BUG_ON(ep->ptd_offset < 0);
prefetch(ptd);
isp1362_write_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE);
if (len)
isp1362_write_buffer(isp1362_hcd, ep->data,
ep->ptd_offset + PTD_HEADER_SIZE, len);
dump_ptd(ptd);
dump_ptd_out_data(ptd, ep->data);
}
示例11: wait_packet_function_ptr
int wait_packet_function_ptr(void *data, int mode) {
struct e1000_adapter *adapter = (struct e1000_adapter*)data;
if(unlikely(enable_debug)) printk("[wait_packet_function_ptr] called [mode=%d]\n", mode);
if(mode == 1) {
struct e1000_ring *rx_ring = adapter->rx_ring;
union e1000_rx_desc_extended *rx_desc;
u16 i = E1000_READ_REG(&adapter->hw, E1000_RDT(0));
/* Very important: update the value from the register set from userland.
* Here i is the last I've read (zero-copy implementation) */
if(++i == rx_ring->count) i = 0;
/* Here i is the next I have to read */
rx_ring->next_to_clean = i;
rx_desc = E1000_RX_DESC_EXT(*rx_ring, rx_ring->next_to_clean);
if(unlikely(enable_debug)) printk("[wait_packet_function_ptr] Check if a packet is arrived\n");
prefetch(rx_desc);
if(!(le32_to_cpu(rx_desc->wb.upper.status_error) & E1000_RXD_STAT_DD)) {
adapter->dna.interrupt_received = 0;
#if 0
if(!adapter->dna.interrupt_enabled) {
e1000_irq_enable(adapter), adapter->dna.interrupt_enabled = 1;
if(unlikely(enable_debug)) printk("[wait_packet_function_ptr] Packet not arrived yet: enabling interrupts\n");
}
#endif
} else
adapter->dna.interrupt_received = 1;
return(le32_to_cpu(rx_desc->wb.upper.status_error) & E1000_RXD_STAT_DD);
} else {
if(adapter->dna.interrupt_enabled) {
e1000_irq_disable(adapter);
adapter->dna.interrupt_enabled = 0;
if(unlikely(enable_debug)) printk("[wait_packet_function_ptr] Disabled interrupts\n");
}
return(0);
}
}
示例12: s3c2410_udc_write_packet
/*
* s3c2410_udc_write_packet
*/
static inline int s3c2410_udc_write_packet(int fifo,
struct s3c2410_request *req,
unsigned max)
{
unsigned len = min(req->req.length - req->req.actual, max);
u8 *buf = req->req.buf + req->req.actual;
prefetch(buf);
dprintk(DEBUG_VERBOSE, "%s %d %d %d %d\n", __func__,
req->req.actual, req->req.length, len, req->req.actual + len);
req->req.actual += len;
udelay(5);
writesb(base_addr + fifo, buf, len);
return len;
}
示例13: siftDownSingleStep
void siftDownSingleStep(ssize_t const end, ssize_t const root) {
ssize_t const left = root * 2;
ssize_t const right = left + 1;
if (right <= end) {
ssize_t const maxChild = root * 2
+ compOp(a[left], Below, a[right]);
if (compOp(a[root], Below, a[maxChild])) {
std::swap(a[root], a[maxChild]);
queue[queueStoreIndex] = maxChild;
queueStoreIndex++;
prefetch(a + std::min(maxChild * 2, end));
}
} else {
if (left == end && compOp(a[root], Below, a[left])) {
std::swap(a[root], a[left]);
}
}
}
示例14: pcap_dispatch
int
pcap_dispatch(pcap_t *p, int cnt, pcap_handler callback, u_char *user)
{
struct pcap_ring *pme = p;
struct my_ring *me = &pme->me;
int got = 0;
u_int si;
ND("cnt %d", cnt);
if (cnt == 0)
cnt = -1;
/* scan all rings */
for (si = me->begin; si < me->end; si++) {
struct netmap_ring *ring = NETMAP_RXRING(me->nifp, si);
ND("ring has %d pkts", ring->avail);
if (ring->avail == 0)
continue;
pme->hdr.ts = ring->ts;
/*
* XXX a proper prefetch should be done as
* prefetch(i); callback(i-1); ...
*/
while ((cnt == -1 || cnt != got) && ring->avail > 0) {
u_int i = ring->cur;
u_int idx = ring->slot[i].buf_idx;
if (idx < 2) {
D("%s bogus RX index %d at offset %d",
me->nifp->ni_name, idx, i);
sleep(2);
}
u_char *buf = (u_char *)NETMAP_BUF(ring, idx);
prefetch(buf);
pme->hdr.len = pme->hdr.caplen = ring->slot[i].len;
// D("call %p len %d", p, me->hdr.len);
callback(user, &pme->hdr, buf);
ring->cur = NETMAP_RING_NEXT(ring, i);
ring->avail--;
got++;
}
}
pme->st.ps_recv += got;
return got;
}
示例15: check_cqe
/**
* this function polls the CQ, and extracts the needed fields
* upon CQE error state it will return -1
* if a bad checksum packet or a filler bit it will return VMA_MP_RQ_BAD_PACKET
*/
int cq_mgr_mp::poll_mp_cq(uint16_t &size, uint32_t &strides_used,
uint32_t &flags, struct mlx5_cqe64 *&out_cqe64)
{
struct mlx5_cqe64 *cqe= check_cqe();
if (likely(cqe)) {
if (unlikely(MLX5_CQE_OPCODE(cqe->op_own) != MLX5_CQE_RESP_SEND)) {
cq_logdbg("Warning op_own is %x", MLX5_CQE_OPCODE(cqe->op_own));
// optimize checks in ring by setting size non zero
if (MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_RESP_ERR) {
cq_logdbg("poll_length, CQE response error, "
"syndrome=0x%x, vendor syndrome error=0x%x, "
"HW syndrome 0x%x, HW syndrome type 0x%x\n",
((struct mlx5_err_cqe *)cqe)->syndrome,
((struct mlx5_err_cqe *)cqe)->vendor_err_synd,
((struct mlx5_err_cqe *)cqe)->hw_err_synd,
((struct mlx5_err_cqe *)cqe)->hw_synd_type);
}
size = 1;
m_p_cq_stat->n_rx_pkt_drop++;
return -1;
}
m_p_cq_stat->n_rx_pkt_drop += cqe->sop_qpn.sop;
out_cqe64 = cqe;
uint32_t stride_byte_cnt = ntohl(cqe->byte_cnt);
strides_used = (stride_byte_cnt & MP_RQ_NUM_STRIDES_FIELD_MASK) >>
MP_RQ_NUM_STRIDES_FIELD_SHIFT;
flags = (!!(cqe->hds_ip_ext & MLX5_CQE_L4_OK) * IBV_EXP_CQ_RX_TCP_UDP_CSUM_OK) |
(!!(cqe->hds_ip_ext & MLX5_CQE_L3_OK) * IBV_EXP_CQ_RX_IP_CSUM_OK);
if (likely(flags == UDP_OK_FLAGS)) {
size = stride_byte_cnt & MP_RQ_BYTE_CNT_FIELD_MASK;
} else {
// if CSUM is bad it can be either filler or bad packet
flags = VMA_MP_RQ_BAD_PACKET;
size = 1;
if (stride_byte_cnt & MP_RQ_FILLER_FIELD_MASK) {
m_p_cq_stat->n_rx_pkt_drop++;
}
}
++m_mlx5_cq.cq_ci;
prefetch((uint8_t*)m_mlx5_cq.cq_buf + ((m_mlx5_cq.cq_ci & (m_mlx5_cq.cqe_count - 1)) << m_mlx5_cq.cqe_size_log));
} else {