本文整理汇总了C++中RING_GET_REQUEST函数的典型用法代码示例。如果您正苦于以下问题:C++ RING_GET_REQUEST函数的具体用法?C++ RING_GET_REQUEST怎么用?C++ RING_GET_REQUEST使用的例子?那么, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了RING_GET_REQUEST函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: xen_block_get_request
static int xen_block_get_request(XenBlockDataPlane *dataplane,
XenBlockRequest *request, RING_IDX rc)
{
switch (dataplane->protocol) {
case BLKIF_PROTOCOL_NATIVE: {
blkif_request_t *req =
RING_GET_REQUEST(&dataplane->rings.native, rc);
memcpy(&request->req, req, sizeof(request->req));
break;
}
case BLKIF_PROTOCOL_X86_32: {
blkif_x86_32_request_t *req =
RING_GET_REQUEST(&dataplane->rings.x86_32_part, rc);
blkif_get_x86_32_req(&request->req, req);
break;
}
case BLKIF_PROTOCOL_X86_64: {
blkif_x86_64_request_t *req =
RING_GET_REQUEST(&dataplane->rings.x86_64_part, rc);
blkif_get_x86_64_req(&request->req, req);
break;
}
}
/* Prevent the compiler from accessing the on-ring fields instead. */
barrier();
return 0;
}
示例2: netbk_gop_skb
/*
* Prepare an SKB to be transmitted to the frontend.
*
* This function is responsible for allocating grant operations, meta
* structures, etc.
*
* It returns the number of meta structures consumed. The number of
* ring slots used is always equal to the number of meta slots used
* plus the number of GSO descriptors used. Currently, we use either
* zero GSO descriptors (for non-GSO packets) or one descriptor (for
* frontend-side LRO).
*/
static int netbk_gop_skb(struct sk_buff *skb,
struct netrx_pending_operations *npo)
{
struct xenvif *vif = netdev_priv(skb->dev);
int nr_frags = skb_shinfo(skb)->nr_frags;
int i;
struct xen_netif_rx_request *req;
struct netbk_rx_meta *meta;
unsigned char *data;
int head = 1;
int old_meta_prod;
old_meta_prod = npo->meta_prod;
/* Set up a GSO prefix descriptor, if necessary */
if (skb_shinfo(skb)->gso_size && vif->gso_prefix) {
req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
meta = npo->meta + npo->meta_prod++;
meta->gso_size = skb_shinfo(skb)->gso_size;
meta->size = 0;
meta->id = req->id;
}
req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
meta = npo->meta + npo->meta_prod++;
if (!vif->gso_prefix)
meta->gso_size = skb_shinfo(skb)->gso_size;
else
meta->gso_size = 0;
meta->size = 0;
meta->id = req->id;
npo->copy_off = 0;
npo->copy_gref = req->gref;
data = skb->data;
while (data < skb_tail_pointer(skb)) {
unsigned int offset = offset_in_page(data);
unsigned int len = PAGE_SIZE - offset;
if (data + len > skb_tail_pointer(skb))
len = skb_tail_pointer(skb) - data;
netbk_gop_frag_copy(vif, skb, npo,
virt_to_page(data), len, offset, &head);
data += len;
}
for (i = 0; i < nr_frags; i++) {
netbk_gop_frag_copy(vif, skb, npo,
skb_frag_page(&skb_shinfo(skb)->frags[i]),
skb_frag_size(&skb_shinfo(skb)->frags[i]),
skb_shinfo(skb)->frags[i].page_offset,
&head);
}
return npo->meta_prod - old_meta_prod;
}
示例3: async_syscall
// TODO: right now there is one channel (remote), in the future, the caller
// may specify local which will cause it to give up the core to do the work.
// creation of additional remote channel also allows the caller to prioritize
// work, because the default policy for the kernel is to roundrobin between them.
int async_syscall(arsc_channel_t* chan, syscall_req_t* req, syscall_desc_t** desc_ptr2)
{
// Note that this assumes one global frontring (TODO)
// abort if there is no room for our request. ring size is currently 64.
// we could spin til it's free, but that could deadlock if this same thread
// is supposed to consume the requests it is waiting on later.
syscall_desc_t* desc = malloc(sizeof (syscall_desc_t));
desc->channel = chan;
syscall_front_ring_t *fr = &(desc->channel->sysfr);
//TODO: can do it locklessly using CAS, but could change with local async calls
struct mcs_lock_qnode local_qn = {0};
mcs_lock_lock(&(chan->aclock), &local_qn);
if (RING_FULL(fr)) {
errno = EBUSY;
return -1;
}
// req_prod_pvt comes in as the previously produced item. need to
// increment to the next available spot, which is the one we'll work on.
// at some point, we need to listen for the responses.
desc->idx = ++(fr->req_prod_pvt);
syscall_req_t* r = RING_GET_REQUEST(fr, desc->idx);
// CAS on the req->status perhaps
req->status = REQ_alloc;
memcpy(r, req, sizeof(syscall_req_t));
r->status = REQ_ready;
// push our updates to syscallfrontring.req_prod_pvt
// note: it is ok to push without protection since it is atomic and kernel
// won't process any requests until they are marked REQ_ready (also atomic)
RING_PUSH_REQUESTS(fr);
//cprintf("DEBUG: sring->req_prod: %d, sring->rsp_prod: %d\n",
mcs_lock_unlock(&desc->channel->aclock, &local_qn);
*desc_ptr2 = desc;
return 0;
}
示例4: blk_get_request
static int blk_get_request(struct XenBlkDev *blkdev, struct ioreq *ioreq, RING_IDX rc)
{
switch (blkdev->protocol) {
case BLKIF_PROTOCOL_NATIVE:
memcpy(&ioreq->req, RING_GET_REQUEST(&blkdev->rings.native, rc),
sizeof(ioreq->req));
break;
case BLKIF_PROTOCOL_X86_32:
blkif_get_x86_32_req(&ioreq->req, RING_GET_REQUEST(&blkdev->rings.x86_32, rc));
break;
case BLKIF_PROTOCOL_X86_64:
blkif_get_x86_64_req(&ioreq->req, RING_GET_REQUEST(&blkdev->rings.x86_64, rc));
break;
}
return 0;
}
示例5: local_irq_save
/*
* Gets a free TX request for copying data to backend
*/
static inline struct netif_tx_request *netfront_get_page(struct netfront_dev *dev)
{
struct netif_tx_request *tx;
unsigned short id;
struct net_txbuffer* buf;
int flags;
local_irq_save(flags);
if (unlikely(!trydown(&dev->tx_sem))) {
local_irq_restore(flags);
return NULL; /* we run out of available pages */
}
id = get_id_from_freelist(dev->tx_freelist);
buf = &dev->tx_buffers[id];
local_irq_restore(flags);
tx = RING_GET_REQUEST(&dev->tx, dev->tx.req_prod_pvt++);
tx->offset = 0;
tx->size = 0;
tx->id = id;
tx->flags = 0;
#ifdef CONFIG_NETFRONT_PERSISTENT_GRANTS
tx->gref = buf->gref;
#else
tx->gref = buf->gref = GRANT_INVALID_REF;
#endif
#ifdef HAVE_LWIP
buf->pbuf = NULL;
#endif
return tx;
}
示例6: init_rx_buffers
void init_rx_buffers(struct netfront_dev *dev)
{
int i, requeue_idx;
netif_rx_request_t *req;
int notify;
/* Rebuild the RX buffer freelist and the RX ring itself. */
for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++)
{
struct net_buffer* buf = &dev->rx_buffers[requeue_idx];
req = RING_GET_REQUEST(&dev->rx, requeue_idx);
buf->gref = req->gref =
gnttab_grant_access(dev->dom,virt_to_mfn(buf->page),0);
req->id = requeue_idx;
requeue_idx++;
}
dev->rx.req_prod_pvt = requeue_idx;
RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&dev->rx, notify);
if (notify)
notify_remote_via_evtchn(dev->evtchn);
dev->rx.sring->rsp_event = dev->rx.rsp_cons + 1;
}
示例7: send_request_to_dom0
int send_request_to_dom0(void)
{
struct as_request *ring_req;
int notify;
static int reqid = 9;
ring_req = RING_GET_REQUEST(&(info.ring), info.ring.req_prod_pvt);
ring_req->id = reqid;
ring_req->operation = reqid;
ring_req->status = reqid;
printk(KERN_DEBUG "\nxen:DomU: Fill in IDX-%d, with id=%d, op=%d, st=%d",
info.ring.req_prod_pvt, ring_req->id, ring_req->operation, ring_req->status);
reqid++;
info.ring.req_prod_pvt += 1;
RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&(info.ring), notify);
if (notify) {
printk(KERN_DEBUG "\nxen: DomU: sent a req to Dom0");
notify_remote_via_irq(info.irq);
} else {
printk(KERN_DEBUG "\nxen:DomU: No notify req to Dom0");
notify_remote_via_irq(info.irq);
}
printk("...\n");
return 0;
}
示例8: xpvtap_user_request_push
/*
* xpvtap_user_request_push()
*/
static int
xpvtap_user_request_push(xpvtap_state_t *state, blkif_request_t *req,
uint_t uid)
{
blkif_request_t *outstanding_req;
blkif_front_ring_t *uring;
blkif_request_t *target;
xpvtap_user_map_t *map;
uring = &state->bt_user_ring.ur_ring;
map = &state->bt_map;
target = RING_GET_REQUEST(uring, uring->req_prod_pvt);
/*
* Save request from the frontend. used for ID mapping and unmap
* on response/cleanup
*/
outstanding_req = &map->um_outstanding_reqs[uid];
bcopy(req, outstanding_req, sizeof (*outstanding_req));
/* put the request on the user ring */
bcopy(req, target, sizeof (*req));
target->id = (uint64_t)uid;
uring->req_prod_pvt++;
pollwakeup(&state->bt_pollhead, POLLIN | POLLRDNORM);
return (DDI_SUCCESS);
}
示例9: PutRequestsOnRing
/* called with urb ring lock held */
static VOID
PutRequestsOnRing(PXENUSB_DEVICE_DATA xudd) {
partial_pvurb_t *partial_pvurb;
uint16_t id;
int notify;
FUNCTION_ENTER();
FUNCTION_MSG("IRQL = %d\n", KeGetCurrentIrql());
while ((partial_pvurb = (partial_pvurb_t *)RemoveHeadList((PLIST_ENTRY)&xudd->partial_pvurb_queue)) != (partial_pvurb_t *)&xudd->partial_pvurb_queue) {
FUNCTION_MSG("partial_pvurb = %p\n", partial_pvurb);
/* if this partial_pvurb is cancelling another we don't need to check if the cancelled partial_pvurb is on the ring - that is taken care of in HandleEvent */
id = get_id_from_freelist(xudd->req_id_ss);
if (id == (uint16_t)-1) {
FUNCTION_MSG("no free ring slots\n");
InsertHeadList(&xudd->partial_pvurb_queue, &partial_pvurb->entry);
break;
}
InsertTailList(&xudd->partial_pvurb_ring, &partial_pvurb->entry);
xudd->partial_pvurbs[id] = partial_pvurb;
partial_pvurb->req.id = id;
*RING_GET_REQUEST(&xudd->urb_ring, xudd->urb_ring.req_prod_pvt) = partial_pvurb->req;
xudd->urb_ring.req_prod_pvt++;
}
RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&xudd->urb_ring, notify);
if (notify) {
FUNCTION_MSG("Notifying\n");
XnNotify(xudd->handle, xudd->event_channel);
}
FUNCTION_EXIT();
}
示例10: xenvif_get_extras
static int xenvif_get_extras(struct xenvif *vif,
struct xen_netif_extra_info *extras,
int work_to_do)
{
struct xen_netif_extra_info extra;
RING_IDX cons = vif->tx.req_cons;
do {
if (unlikely(work_to_do-- <= 0)) {
netdev_err(vif->dev, "Missing extra info\n");
xenvif_fatal_tx_err(vif);
return -EBADR;
}
memcpy(&extra, RING_GET_REQUEST(&vif->tx, cons),
sizeof(extra));
if (unlikely(!extra.type ||
extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
vif->tx.req_cons = ++cons;
netdev_err(vif->dev,
"Invalid extra type: %d\n", extra.type);
xenvif_fatal_tx_err(vif);
return -EINVAL;
}
memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
vif->tx.req_cons = ++cons;
} while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
return work_to_do;
}
示例11: ixp_recover
static int ixp_recover(struct ixpfront_info *info)
{
int i;
struct ixp_request *req;
struct ixp_shadow *copy;
int j;
/* Stage 1: Make a safe copy of the shadow state. */
copy = kmalloc(sizeof(info->shadow),
GFP_NOIO | __GFP_REPEAT | __GFP_HIGH);
if (!copy)
return -ENOMEM;
memcpy(copy, info->shadow, sizeof(info->shadow));
/* Stage 2: Set up free list. */
memset(&info->shadow, 0, sizeof(info->shadow));
for (i = 0; i < IXP_RING_SIZE; i++)
info->shadow[i].req.id = i+1;
info->shadow_free = info->ring.req_prod_pvt;
info->shadow[IXP_RING_SIZE-1].req.id = 0x0fffffff;
/* Stage 3: Find pending requests and requeue them. */
for (i = 0; i < IXP_RING_SIZE; i++) {
/* Not in use? */
if (copy[i].req_page == NULL)
continue;
/* Grab a request slot and copy shadow state into it. */
req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
*req = copy[i].req;
/* We get a new request id, and must reset the shadow state. */
req->id = get_id_from_freelist(info);
memcpy(&info->shadow[req->id], ©[i], sizeof(copy[i]));
/* Rewrite any grant references invalidated by susp/resume. */
for (j = 0; j < req->nr_segments; j++)
gnttab_grant_foreign_access_ref(
req->seg[j].gref,
info->xbdev->otherend_id,
pfn_to_mfn(info->shadow[req->id].frame[j]),
0);
info->shadow[req->id].req = *req;
info->ring.req_prod_pvt++;
}
kfree(copy);
xenbus_switch_state(info->xbdev, XenbusStateConnected);
/* Now safe for us to use the shared ring */
info->connected = IXP_STATE_CONNECTED;
/* Send off requeued requests */
flush_requests(info);
return 0;
}
示例12: blkfront_aio
/* Issue an aio */
void blkfront_aio(struct blkfront_aiocb *aiocbp, int write)
{
struct blkfront_dev *dev = aiocbp->aio_dev;
struct blkif_request *req;
RING_IDX i;
int notify;
int n, j;
uintptr_t start, end;
// Can't io at non-sector-aligned location
ASSERT(!(aiocbp->aio_offset & (dev->info.sector_size-1)));
// Can't io non-sector-sized amounts
ASSERT(!(aiocbp->aio_nbytes & (dev->info.sector_size-1)));
// Can't io non-sector-aligned buffer
ASSERT(!((uintptr_t) aiocbp->aio_buf & (dev->info.sector_size-1)));
start = (uintptr_t)aiocbp->aio_buf & PAGE_MASK;
end = ((uintptr_t)aiocbp->aio_buf + aiocbp->aio_nbytes + PAGE_SIZE - 1) & PAGE_MASK;
aiocbp->n = n = (end - start) / PAGE_SIZE;
/* qemu's IDE max multsect is 16 (8KB) and SCSI max DMA was set to 32KB,
* so max 44KB can't happen */
ASSERT(n <= BLKIF_MAX_SEGMENTS_PER_REQUEST);
blkfront_wait_slot(dev);
i = dev->ring.req_prod_pvt;
req = RING_GET_REQUEST(&dev->ring, i);
req->operation = write ? BLKIF_OP_WRITE : BLKIF_OP_READ;
req->nr_segments = n;
req->handle = dev->handle;
req->id = (uintptr_t) aiocbp;
req->sector_number = aiocbp->aio_offset / 512;
for (j = 0; j < n; j++) {
req->seg[j].first_sect = 0;
req->seg[j].last_sect = PAGE_SIZE / 512 - 1;
}
req->seg[0].first_sect = ((uintptr_t)aiocbp->aio_buf & ~PAGE_MASK) / 512;
req->seg[n-1].last_sect = (((uintptr_t)aiocbp->aio_buf + aiocbp->aio_nbytes - 1) & ~PAGE_MASK) / 512;
for (j = 0; j < n; j++) {
uintptr_t data = start + j * PAGE_SIZE;
if (!write) {
/* Trigger CoW if needed */
*(char*)(data + (req->seg[j].first_sect << 9)) = 0;
barrier();
}
aiocbp->gref[j] = req->seg[j].gref =
gnttab_grant_access(dev->dom, virtual_to_mfn(data), write);
}
dev->ring.req_prod_pvt = i + 1;
wmb();
RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&dev->ring, notify);
if(notify) notify_remote_via_evtchn(dev->evtchn);
}
示例13: xenio_blkif_get_request
/**
* Utility function that retrieves a request using @idx as the ring index,
* copying it to the @dst in a H/W independent way.
*
* @param blkif the block interface
* @param dst address that receives the request
* @param rc the index of the request in the ring
*/
static inline void
xenio_blkif_get_request(struct td_xenblkif * const blkif,
blkif_request_t *const dst, const RING_IDX idx)
{
blkif_back_rings_t * rings;
ASSERT(blkif);
ASSERT(dst);
rings = &blkif->rings;
switch (blkif->proto) {
case BLKIF_PROTOCOL_NATIVE:
{
blkif_request_t *src;
src = RING_GET_REQUEST(&rings->native, idx);
memcpy(dst, src, sizeof(blkif_request_t));
break;
}
case BLKIF_PROTOCOL_X86_32:
{
blkif_x86_32_request_t *src;
src = RING_GET_REQUEST(&rings->x86_32, idx);
blkif_get_req(dst, src);
break;
}
case BLKIF_PROTOCOL_X86_64:
{
blkif_x86_64_request_t *src;
src = RING_GET_REQUEST(&rings->x86_64, idx);
blkif_get_req(dst, src);
break;
}
default:
/*
* TODO log error
*/
ASSERT(0);
}
}
示例14: blk_get_request
static int blk_get_request(struct XenBlkDev *blkdev, struct ioreq *ioreq, RING_IDX rc)
{
switch (blkdev->protocol) {
case BLKIF_PROTOCOL_NATIVE:
memcpy(&ioreq->req, RING_GET_REQUEST(&blkdev->rings.native, rc),
sizeof(ioreq->req));
break;
case BLKIF_PROTOCOL_X86_32:
blkif_get_x86_32_req(&ioreq->req,
RING_GET_REQUEST(&blkdev->rings.x86_32_part, rc));
break;
case BLKIF_PROTOCOL_X86_64:
blkif_get_x86_64_req(&ioreq->req,
RING_GET_REQUEST(&blkdev->rings.x86_64_part, rc));
break;
}
/* Prevent the compiler from accessing the on-ring fields instead. */
barrier();
return 0;
}
示例15: xenvif_tx_err
static void xenvif_tx_err(struct xenvif *vif,
struct xen_netif_tx_request *txp, RING_IDX end)
{
RING_IDX cons = vif->tx.req_cons;
do {
make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
if (cons == end)
break;
txp = RING_GET_REQUEST(&vif->tx, cons++);
} while (1);
vif->tx.req_cons = cons;
}