本文整理汇总了C++中qemu_bh_schedule函数的典型用法代码示例。如果您正苦于以下问题:C++ qemu_bh_schedule函数的具体用法?C++ qemu_bh_schedule怎么用?C++ qemu_bh_schedule使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了qemu_bh_schedule函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: if
/* This is where we get a request from a caller to read something */
static BlockDriverAIOCB *tar_aio_readv(BlockDriverState *bs,
int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
BlockDriverCompletionFunc *cb, void *opaque)
{
BDRVTarState *s = bs->opaque;
SparseCache *sparse;
int64_t sec_file = sector_num + s->file_sec;
int64_t start = sector_num * SECTOR_SIZE;
int64_t end = start + (nb_sectors * SECTOR_SIZE);
int i;
TarAIOCB *acb;
for (i = 0; i < s->sparse_num; i++) {
sparse = &s->sparse[i];
if (sparse->start > end) {
/* We expect the cache to be start increasing */
break;
} else if ((sparse->start < start) && (sparse->end <= start)) {
/* sparse before our offset */
sec_file -= (sparse->end - sparse->start) / SECTOR_SIZE;
} else if ((sparse->start <= start) && (sparse->end >= end)) {
/* all our sectors are sparse */
char *buf = g_malloc0(nb_sectors * SECTOR_SIZE);
acb = qemu_aio_get(&tar_aiocb_info, bs, cb, opaque);
qemu_iovec_from_buf(qiov, 0, buf, nb_sectors * SECTOR_SIZE);
g_free(buf);
acb->bh = qemu_bh_new(tar_sparse_cb, acb);
qemu_bh_schedule(acb->bh);
return &acb->common;
} else if (((sparse->start >= start) && (sparse->start < end)) ||
((sparse->end >= start) && (sparse->end < end))) {
/* we're semi-sparse (worst case) */
/* let's go synchronous and read all sectors individually */
char *buf = g_malloc(nb_sectors * SECTOR_SIZE);
uint64_t offs;
for (offs = 0; offs < (nb_sectors * SECTOR_SIZE);
offs += SECTOR_SIZE) {
bdrv_pread(bs, (sector_num * SECTOR_SIZE) + offs,
buf + offs, SECTOR_SIZE);
}
qemu_iovec_from_buf(qiov, 0, buf, nb_sectors * SECTOR_SIZE);
acb = qemu_aio_get(&tar_aiocb_info, bs, cb, opaque);
acb->bh = qemu_bh_new(tar_sparse_cb, acb);
qemu_bh_schedule(acb->bh);
return &acb->common;
}
}
return bdrv_aio_readv(s->hd, sec_file, qiov, nb_sectors,
cb, opaque);
}
示例2: qxl_render_cursor
/* called from spice server thread context only */
int qxl_render_cursor(PCIQXLDevice *qxl, QXLCommandExt *ext)
{
QXLCursorCmd *cmd = qxl_phys2virt(qxl, ext->cmd.data, ext->group_id);
QXLCursor *cursor;
QEMUCursor *c;
if (!cmd) {
return 1;
}
if (!dpy_cursor_define_supported(qxl->vga.con)) {
return 0;
}
if (qxl->debug > 1 && cmd->type != QXL_CURSOR_MOVE) {
fprintf(stderr, "%s", __FUNCTION__);
qxl_log_cmd_cursor(qxl, cmd, ext->group_id);
fprintf(stderr, "\n");
}
switch (cmd->type) {
case QXL_CURSOR_SET:
cursor = qxl_phys2virt(qxl, cmd->u.set.shape, ext->group_id);
if (!cursor) {
return 1;
}
if (cursor->chunk.data_size != cursor->data_size) {
fprintf(stderr, "%s: multiple chunks\n", __FUNCTION__);
return 1;
}
c = qxl_cursor(qxl, cursor);
if (c == NULL) {
c = cursor_builtin_left_ptr();
}
qemu_mutex_lock(&qxl->ssd.lock);
if (qxl->ssd.cursor) {
cursor_put(qxl->ssd.cursor);
}
qxl->ssd.cursor = c;
qxl->ssd.mouse_x = cmd->u.set.position.x;
qxl->ssd.mouse_y = cmd->u.set.position.y;
qemu_mutex_unlock(&qxl->ssd.lock);
qemu_bh_schedule(qxl->ssd.cursor_bh);
break;
case QXL_CURSOR_MOVE:
qemu_mutex_lock(&qxl->ssd.lock);
qxl->ssd.mouse_x = cmd->u.position.x;
qxl->ssd.mouse_y = cmd->u.position.y;
qemu_mutex_unlock(&qxl->ssd.lock);
qemu_bh_schedule(qxl->ssd.cursor_bh);
break;
}
return 0;
}
示例3: qemu_laio_completion_bh
/* The completion BH fetches completed I/O requests and invokes their
* callbacks.
*
* The function is somewhat tricky because it supports nested event loops, for
* example when a request callback invokes aio_poll(). In order to do this,
* the completion events array and index are kept in qemu_laio_state. The BH
* reschedules itself as long as there are completions pending so it will
* either be called again in a nested event loop or will be called after all
* events have been completed. When there are no events left to complete, the
* BH returns without rescheduling.
*/
static void qemu_laio_completion_bh(void *opaque)
{
struct qemu_laio_state *s = opaque;
/* Fetch more completion events when empty */
if (s->event_idx == s->event_max) {
do {
struct timespec ts = { 0 };
s->event_max = io_getevents(s->ctx, MAX_EVENTS, MAX_EVENTS,
s->events, &ts);
} while (s->event_max == -EINTR);
s->event_idx = 0;
if (s->event_max <= 0) {
s->event_max = 0;
return; /* no more events */
}
}
/* Reschedule so nested event loops see currently pending completions */
qemu_bh_schedule(s->completion_bh);
/* Process completion events */
while (s->event_idx < s->event_max) {
struct iocb *iocb = s->events[s->event_idx].obj;
struct qemu_laiocb *laiocb =
container_of(iocb, struct qemu_laiocb, iocb);
laiocb->ret = io_event_ret(&s->events[s->event_idx]);
s->event_idx++;
qemu_laio_process_completion(s, laiocb);
}
}
示例4: qemu_notify_event
void qemu_notify_event(void)
{
if (!qemu_aio_context) {
return;
}
qemu_bh_schedule(qemu_notify_bh);
}
示例5: continue_after_map_failure
static void continue_after_map_failure(void *opaque)
{
DMAAIOCB *dbs = (DMAAIOCB *)opaque;
dbs->bh = qemu_bh_new(reschedule_dma, dbs);
qemu_bh_schedule(dbs->bh);
}
示例6: virtio_net_set_status
static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status)
{
VirtIONet *n = to_virtio_net(vdev);
virtio_net_vhost_status(n, status);
if (!n->tx_waiting) {
return;
}
if (virtio_net_started(n, status) && !n->vhost_started) {
if (n->tx_timer) {
qemu_mod_timer(n->tx_timer,
qemu_get_clock_ns(vm_clock) + n->tx_timeout);
} else {
qemu_bh_schedule(n->tx_bh);
}
} else {
if (n->tx_timer) {
qemu_del_timer(n->tx_timer);
} else {
qemu_bh_cancel(n->tx_bh);
}
}
}
示例7: qemu_aio_complete
static void qemu_aio_complete(void *opaque, int ret)
{
struct ioreq *ioreq = opaque;
if (ret != 0) {
xen_be_printf(&ioreq->blkdev->xendev, 0, "%s I/O error\n",
ioreq->req.operation == BLKIF_OP_READ ? "read" : "write");
ioreq->aio_errors++;
}
ioreq->aio_inflight--;
if (ioreq->presync) {
ioreq->presync = 0;
ioreq_runio_qemu_aio(ioreq);
return;
}
if (ioreq->aio_inflight > 0) {
return;
}
if (ioreq->postsync) {
ioreq->postsync = 0;
ioreq->aio_inflight++;
bdrv_aio_flush(ioreq->blkdev->bs, qemu_aio_complete, ioreq);
return;
}
ioreq->status = ioreq->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY;
ioreq_unmap(ioreq);
ioreq_finish(ioreq);
bdrv_acct_done(ioreq->blkdev->bs, &ioreq->acct);
qemu_bh_schedule(ioreq->blkdev->bh);
}
示例8: iscsi_co_generic_cb
static void
iscsi_co_generic_cb(struct iscsi_context *iscsi, int status,
void *command_data, void *opaque)
{
struct IscsiTask *iTask = opaque;
struct scsi_task *task = command_data;
iTask->complete = 1;
iTask->status = status;
iTask->do_retry = 0;
iTask->task = task;
if (iTask->retries-- > 0 && status == SCSI_STATUS_CHECK_CONDITION
&& task->sense.key == SCSI_SENSE_UNIT_ATTENTION) {
iTask->do_retry = 1;
goto out;
}
if (status != SCSI_STATUS_GOOD) {
error_report("iSCSI: Failure. %s", iscsi_get_error(iscsi));
}
out:
if (iTask->co) {
iTask->bh = qemu_bh_new(iscsi_co_generic_bh_cb, iTask);
qemu_bh_schedule(iTask->bh);
}
}
示例9: bh_test_cb
static void bh_test_cb(void *opaque)
{
BHTestData *data = opaque;
if (++data->n < data->max) {
qemu_bh_schedule(data->bh);
}
}
示例10: qemu_aio_get
static BlockDriverAIOCB *raw_aio_write(BlockDriverState *bs,
int64_t sector_num, const uint8_t *buf, int nb_sectors,
BlockDriverCompletionFunc *cb, void *opaque)
{
RawAIOCB *acb;
/*
* If O_DIRECT is used and the buffer is not aligned fall back
* to synchronous IO.
*/
BDRVRawState *s = bs->opaque;
if (unlikely(s->aligned_buf != NULL && ((uintptr_t) buf % 512))) {
QEMUBH *bh;
acb = qemu_aio_get(bs, cb, opaque);
acb->ret = raw_pwrite(bs, 512 * sector_num, buf, 512 * nb_sectors);
bh = qemu_bh_new(raw_aio_em_cb, acb);
qemu_bh_schedule(bh);
return &acb->common;
}
acb = raw_aio_setup(bs, sector_num, (uint8_t*)buf, nb_sectors, cb, opaque);
if (!acb)
return NULL;
if (qemu_paio_write(&acb->aiocb) < 0) {
raw_aio_remove(acb);
return NULL;
}
return &acb->common;
}
示例11: tpm_tis_receive_cb
/*
* Callback from the TPM to indicate that the response was received.
*/
static void tpm_tis_receive_cb(TPMState *s, uint8_t locty)
{
TPMTISEmuState *tis = &s->s.tis;
assert(s->locty_number == locty);
qemu_bh_schedule(tis->bh);
}
示例12: qemu_laio_completion_cb
static void qemu_laio_completion_cb(EventNotifier *e)
{
struct qemu_laio_state *s = container_of(e, struct qemu_laio_state, e);
if (event_notifier_test_and_clear(&s->e)) {
qemu_bh_schedule(s->completion_bh);
}
}
示例13: blk_handle_requests
static void blk_handle_requests(struct XenBlkDev *blkdev)
{
RING_IDX rc, rp;
struct ioreq *ioreq;
blkdev->more_work = 0;
rc = blkdev->rings.common.req_cons;
rp = blkdev->rings.common.sring->req_prod;
xen_rmb(); /* Ensure we see queued requests up to 'rp'. */
blk_send_response_all(blkdev);
while (rc != rp) {
/* pull request from ring */
if (RING_REQUEST_CONS_OVERFLOW(&blkdev->rings.common, rc)) {
break;
}
ioreq = ioreq_start(blkdev);
if (ioreq == NULL) {
blkdev->more_work++;
break;
}
blk_get_request(blkdev, ioreq, rc);
blkdev->rings.common.req_cons = ++rc;
/* parse them */
if (ioreq_parse(ioreq) != 0) {
switch (ioreq->req.operation) {
case BLKIF_OP_READ:
block_acct_invalid(blk_get_stats(blkdev->blk),
BLOCK_ACCT_READ);
break;
case BLKIF_OP_WRITE:
block_acct_invalid(blk_get_stats(blkdev->blk),
BLOCK_ACCT_WRITE);
break;
case BLKIF_OP_FLUSH_DISKCACHE:
block_acct_invalid(blk_get_stats(blkdev->blk),
BLOCK_ACCT_FLUSH);
default:
break;
};
if (blk_send_response_one(ioreq)) {
xen_pv_send_notify(&blkdev->xendev);
}
ioreq_release(ioreq, false);
continue;
}
ioreq_runio_qemu_aio(ioreq);
}
if (blkdev->more_work && blkdev->requests_inflight < blkdev->max_requests) {
qemu_bh_schedule(blkdev->bh);
}
}
示例14: qlooper_addPendingIo
static void
qlooper_addPendingIo(QLooper* looper, QLoopIo* io)
{
if (looper->io_pending == NULL) {
qemu_bh_schedule(looper->io_bh);
}
io->pendingNext = looper->io_pending;
looper->io_pending = io;
}
示例15: iscsi_schedule_bh
static void
iscsi_schedule_bh(IscsiAIOCB *acb)
{
if (acb->bh) {
return;
}
acb->bh = aio_bh_new(acb->iscsilun->aio_context, iscsi_bh_cb, acb);
qemu_bh_schedule(acb->bh);
}