本文整理汇总了C++中qemu_blockalign函数的典型用法代码示例。如果您正苦于以下问题:C++ qemu_blockalign函数的具体用法?C++ qemu_blockalign怎么用?C++ qemu_blockalign使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了qemu_blockalign函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: qed_write_header
/**
* Update header in-place (does not rewrite backing filename or other strings)
*
* This function only updates known header fields in-place and does not affect
* extra data after the QED header.
*/
static void qed_write_header(BDRVQEDState *s, BlockDriverCompletionFunc cb,
void *opaque)
{
/* We must write full sectors for O_DIRECT but cannot necessarily generate
* the data following the header if an unrecognized compat feature is
* active. Therefore, first read the sectors containing the header, update
* them, and write back.
*/
int nsectors = (sizeof(QEDHeader) + BDRV_SECTOR_SIZE - 1) /
BDRV_SECTOR_SIZE;
size_t len = nsectors * BDRV_SECTOR_SIZE;
QEDWriteHeaderCB *write_header_cb = gencb_alloc(sizeof(*write_header_cb),
cb, opaque);
write_header_cb->s = s;
write_header_cb->nsectors = nsectors;
write_header_cb->buf = qemu_blockalign(s->bs, len);
write_header_cb->iov.iov_base = write_header_cb->buf;
write_header_cb->iov.iov_len = len;
qemu_iovec_init_external(&write_header_cb->qiov, &write_header_cb->iov, 1);
bdrv_aio_readv(s->bs->file, 0, &write_header_cb->qiov, nsectors,
qed_write_header_read_cb, write_header_cb);
}
示例2: qemu_blockalign
static void *qemu_io_alloc(size_t len, int pattern)
{
void *buf;
if (misalign)
len += MISALIGN_OFFSET;
buf = qemu_blockalign(bs, len);
memset(buf, pattern, len);
if (misalign)
buf += MISALIGN_OFFSET;
return buf;
}
示例3: vhdx_log_read_desc
/* Reads the log header, and subsequent descriptors (if any). This
* will allocate all the space for buffer, which must be NULL when
* passed into this function. Each descriptor will also be validated,
* and error returned if any are invalid. */
static int vhdx_log_read_desc(BlockDriverState *bs, BDRVVHDXState *s,
VHDXLogEntries *log, VHDXLogDescEntries **buffer)
{
int ret = 0;
uint32_t desc_sectors;
uint32_t sectors_read;
VHDXLogEntryHeader hdr;
VHDXLogDescEntries *desc_entries = NULL;
int i;
assert(*buffer == NULL);
ret = vhdx_log_peek_hdr(bs, log, &hdr);
if (ret < 0) {
goto exit;
}
vhdx_log_entry_hdr_le_import(&hdr);
if (vhdx_log_hdr_is_valid(log, &hdr, s) == false) {
ret = -EINVAL;
goto exit;
}
desc_sectors = vhdx_compute_desc_sectors(hdr.descriptor_count);
desc_entries = qemu_blockalign(bs, desc_sectors * VHDX_LOG_SECTOR_SIZE);
ret = vhdx_log_read_sectors(bs, log, §ors_read, desc_entries,
desc_sectors, false);
if (ret < 0) {
goto free_and_exit;
}
if (sectors_read != desc_sectors) {
ret = -EINVAL;
goto free_and_exit;
}
/* put in proper endianness, and validate each desc */
for (i = 0; i < hdr.descriptor_count; i++) {
vhdx_log_desc_le_import(&desc_entries->desc[i]);
if (vhdx_log_desc_is_valid(&desc_entries->desc[i], &hdr) == false) {
ret = -EINVAL;
goto free_and_exit;
}
}
*buffer = desc_entries;
goto exit;
free_and_exit:
qemu_vfree(desc_entries);
exit:
return ret;
}
示例4: g_malloc0
Qcow2Cache *qcow2_cache_create(BlockDriverState *bs, int num_tables)
{
BDRVQcowState *s = bs->opaque;
Qcow2Cache *c;
int i;
c = g_malloc0(sizeof(*c));
c->size = num_tables;
c->entries = g_malloc0(sizeof(*c->entries) * num_tables);
for (i = 0; i < c->size; i++) {
c->entries[i].table = qemu_blockalign(bs, s->cluster_size);
}
return c;
}
示例5: raw_open_common
static int raw_open_common(BlockDriverState *bs, const char *filename,
int bdrv_flags, int open_flags)
{
BDRVRawState *s = bs->opaque;
int fd, ret;
posix_aio_init();
s->lseek_err_cnt = 0;
s->open_flags = open_flags | O_BINARY;
s->open_flags &= ~O_ACCMODE;
if ((bdrv_flags & BDRV_O_ACCESS) == BDRV_O_RDWR) {
s->open_flags |= O_RDWR;
} else {
s->open_flags |= O_RDONLY;
bs->read_only = 1;
}
/* Use O_DSYNC for write-through caching, no flags for write-back caching,
* and O_DIRECT for no caching. */
if ((bdrv_flags & BDRV_O_NOCACHE))
s->open_flags |= O_DIRECT;
else if (!(bdrv_flags & BDRV_O_CACHE_WB))
s->open_flags |= O_DSYNC;
s->fd = -1;
fd = open(filename, s->open_flags, 0644);
if (fd < 0) {
ret = -errno;
if (ret == -EROFS)
ret = -EACCES;
return ret;
}
s->fd = fd;
s->aligned_buf = NULL;
if ((bdrv_flags & BDRV_O_NOCACHE)) {
s->aligned_buf = qemu_blockalign(bs, ALIGNED_BUFFER_SIZE);
if (s->aligned_buf == NULL) {
ret = -errno;
close(fd);
return ret;
}
}
return 0;
}
示例6: qed_write_table
/**
* Write out an updated part or all of a table
*
* @s: QED state
* @offset: Offset of table in image file, in bytes
* @table: Table
* @index: Index of first element
* @n: Number of elements
* @flush: Whether or not to sync to disk
* @cb: Completion function
* @opaque: Argument for completion function
*/
static void qed_write_table(BDRVQEDState *s, uint64_t offset, QEDTable *table,
unsigned int index, unsigned int n, bool flush,
BlockDriverCompletionFunc *cb, void *opaque)
{
QEDWriteTableCB *write_table_cb;
BlockDriverAIOCB *aiocb;
unsigned int sector_mask = BDRV_SECTOR_SIZE / sizeof(uint64_t) - 1;
unsigned int start, end, i;
size_t len_bytes;
trace_qed_write_table(s, offset, table, index, n);
/* Calculate indices of the first and one after last elements */
start = index & ~sector_mask;
end = (index + n + sector_mask) & ~sector_mask;
len_bytes = (end - start) * sizeof(uint64_t);
write_table_cb = gencb_alloc(sizeof(*write_table_cb), cb, opaque);
write_table_cb->s = s;
write_table_cb->orig_table = table;
write_table_cb->flush = flush;
write_table_cb->table = qemu_blockalign(s->bs, len_bytes);
write_table_cb->iov.iov_base = write_table_cb->table->offsets;
write_table_cb->iov.iov_len = len_bytes;
qemu_iovec_init_external(&write_table_cb->qiov, &write_table_cb->iov, 1);
/* Byteswap table */
for (i = start; i < end; i++) {
uint64_t le_offset = cpu_to_le64(table->offsets[i]);
write_table_cb->table->offsets[i - start] = le_offset;
}
/* Adjust for offset into table */
offset += start * sizeof(uint64_t);
aiocb = bdrv_aio_writev(s->bs->file, offset / BDRV_SECTOR_SIZE,
&write_table_cb->qiov,
write_table_cb->qiov.size / BDRV_SECTOR_SIZE,
qed_write_table_cb, write_table_cb);
if (!aiocb) {
qed_write_table_cb(write_table_cb, -EIO);
}
}
示例7: assert
static NBDRequest *nbd_request_get(NBDClient *client)
{
NBDRequest *req;
NBDExport *exp = client->exp;
assert(client->nb_requests <= MAX_NBD_REQUESTS - 1);
client->nb_requests++;
if (QSIMPLEQ_EMPTY(&exp->requests)) {
req = g_malloc0(sizeof(NBDRequest));
req->data = qemu_blockalign(exp->bs, NBD_BUFFER_SIZE);
} else {
req = QSIMPLEQ_FIRST(&exp->requests);
QSIMPLEQ_REMOVE_HEAD(&exp->requests, entry);
}
nbd_client_get(client);
req->client = client;
return req;
}
示例8: blkverify_aio_get
static BlockDriverAIOCB *blkverify_aio_readv(BlockDriverState *bs,
int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
BlockDriverCompletionFunc *cb, void *opaque)
{
BDRVBlkverifyState *s = bs->opaque;
BlkverifyAIOCB *acb = blkverify_aio_get(bs, false, sector_num, qiov,
nb_sectors, cb, opaque);
acb->verify = blkverify_verify_readv;
acb->buf = qemu_blockalign(bs->file, qiov->size);
qemu_iovec_init(&acb->raw_qiov, acb->qiov->niov);
blkverify_iovec_clone(&acb->raw_qiov, qiov, acb->buf);
bdrv_aio_readv(s->test_file, sector_num, qiov, nb_sectors,
blkverify_aio_cb, acb);
bdrv_aio_readv(bs->file, sector_num, &acb->raw_qiov, nb_sectors,
blkverify_aio_cb, acb);
return &acb->common;
}
示例9: compare_full_images
static int compare_full_images (void)
{
CompareFullCB *cf;
int old_copy_on_read = FALSE;
printf ("Performing a full comparison of the truth image and "
"the test image...\n");
if (!strncmp (bs->drv->format_name, "fvd", 3)) {
/* Disable copy-on-read when scanning through the entire image. */
old_copy_on_read = fvd_get_copy_on_read (bs);
fvd_set_copy_on_read (bs, FALSE);
}
cf = g_malloc(sizeof(CompareFullCB));
cf->max_nb_sectors = 1048576L / 512;
cf->nb_sectors = MIN (cf->max_nb_sectors, total_sectors);
if (posix_memalign ((void **) &cf->truth_buf, 512,
cf->max_nb_sectors * 512) != 0) {
die ("posix_memalign");
}
cf->iov.iov_base = qemu_blockalign (bs, cf->max_nb_sectors * 512);
cf->iov.iov_len = cf->nb_sectors * 512;
cf->sector_num = 0;
qemu_iovec_init_external (&cf->qiov, &cf->iov, 1);
if (!bdrv_aio_readv (bs, cf->sector_num, &cf->qiov,
cf->nb_sectors, compare_full_images_cb, cf)) {
die ("bdrv_aio_readv\n");
}
sim_all_tasks ();
if (!strncmp (bs->drv->format_name, "fvd", 3)) {
fvd_set_copy_on_read (bs, old_copy_on_read);
}
return 0;
}
示例10: qemu_blockalign
QEDTable *qed_alloc_table(BDRVQEDState *s)
{
/* Honor O_DIRECT memory alignment requirements */
return qemu_blockalign(s->bs,
s->header.cluster_size * s->header.table_size);
}
示例11: main
//.........这里部分代码省略.........
ret = nbd_receive_negotiate(sock, NULL, &nbdflags,
&size, &blocksize);
if (ret == -1) {
ret = 1;
goto out;
}
ret = nbd_init(fd, sock, size, blocksize);
if (ret == -1) {
ret = 1;
goto out;
}
printf("NBD device %s is now connected to file %s\n",
device, argv[optind]);
/* update partition table */
show_parts(device);
ret = nbd_client(fd);
if (ret) {
ret = 1;
}
close(fd);
out:
kill(pid, SIGTERM);
unlink(socket);
return ret;
}
/* children */
}
sharing_fds = qemu_malloc((shared + 1) * sizeof(int));
if (socket) {
sharing_fds[0] = unix_socket_incoming(socket);
} else {
sharing_fds[0] = tcp_socket_incoming(bindto, port);
}
if (sharing_fds[0] == -1)
return 1;
max_fd = sharing_fds[0];
nb_fds++;
data = qemu_blockalign(bs, NBD_BUFFER_SIZE);
if (data == NULL)
errx(EXIT_FAILURE, "Cannot allocate data buffer");
do {
FD_ZERO(&fds);
for (i = 0; i < nb_fds; i++)
FD_SET(sharing_fds[i], &fds);
ret = select(max_fd + 1, &fds, NULL, NULL, NULL);
if (ret == -1)
break;
if (FD_ISSET(sharing_fds[0], &fds))
ret--;
for (i = 1; i < nb_fds && ret; i++) {
if (FD_ISSET(sharing_fds[i], &fds)) {
if (nbd_trip(bs, sharing_fds[i], fd_size, dev_offset,
&offset, readonly, data, NBD_BUFFER_SIZE) != 0) {
close(sharing_fds[i]);
nb_fds--;
sharing_fds[i] = sharing_fds[nb_fds];
i--;
}
ret--;
}
}
/* new connection ? */
if (FD_ISSET(sharing_fds[0], &fds)) {
if (nb_fds < shared + 1) {
sharing_fds[nb_fds] = accept(sharing_fds[0],
(struct sockaddr *)&addr,
&addr_len);
if (sharing_fds[nb_fds] != -1 &&
nbd_negotiate(sharing_fds[nb_fds], fd_size) != -1) {
if (sharing_fds[nb_fds] > max_fd)
max_fd = sharing_fds[nb_fds];
nb_fds++;
}
}
}
} while (persistent || nb_fds > 1);
qemu_vfree(data);
close(sharing_fds[0]);
bdrv_close(bs);
qemu_free(sharing_fds);
if (socket)
unlink(socket);
return 0;
}
示例12: commit_run
static void coroutine_fn commit_run(void *opaque)
{
CommitBlockJob *s = opaque;
BlockDriverState *active = s->active;
BlockDriverState *top = s->top;
BlockDriverState *base = s->base;
BlockDriverState *overlay_bs = NULL;
int64_t sector_num, end;
int ret = 0;
int n = 0;
void *buf;
int bytes_written = 0;
int64_t base_len;
ret = s->common.len = bdrv_getlength(top);
if (s->common.len < 0) {
goto exit_restore_reopen;
}
ret = base_len = bdrv_getlength(base);
if (base_len < 0) {
goto exit_restore_reopen;
}
if (base_len < s->common.len) {
ret = bdrv_truncate(base, s->common.len);
if (ret) {
goto exit_restore_reopen;
}
}
overlay_bs = bdrv_find_overlay(active, top);
end = s->common.len >> BDRV_SECTOR_BITS;
buf = qemu_blockalign(top, COMMIT_BUFFER_SIZE);
for (sector_num = 0; sector_num < end; sector_num += n) {
uint64_t delay_ms = 0;
bool copy;
wait:
/* Note that even when no rate limit is applied we need to yield
* with no pending I/O here so that qemu_aio_flush() returns.
*/
block_job_sleep(&s->common, rt_clock, delay_ms);
if (block_job_is_cancelled(&s->common)) {
break;
}
/* Copy if allocated above the base */
ret = bdrv_co_is_allocated_above(top, base, sector_num,
COMMIT_BUFFER_SIZE / BDRV_SECTOR_SIZE,
&n);
copy = (ret == 1);
trace_commit_one_iteration(s, sector_num, n, ret);
if (copy) {
if (s->common.speed) {
delay_ms = ratelimit_calculate_delay(&s->limit, n);
if (delay_ms > 0) {
goto wait;
}
}
ret = commit_populate(top, base, sector_num, n, buf);
bytes_written += n * BDRV_SECTOR_SIZE;
}
if (ret < 0) {
if (s->on_error == BLOCK_ERR_STOP_ANY ||
s->on_error == BLOCK_ERR_REPORT ||
(s->on_error == BLOCK_ERR_STOP_ENOSPC && ret == -ENOSPC)) {
goto exit_free_buf;
} else {
n = 0;
continue;
}
}
/* Publish progress */
s->common.offset += n * BDRV_SECTOR_SIZE;
}
ret = 0;
if (!block_job_is_cancelled(&s->common) && sector_num == end) {
/* success */
ret = bdrv_drop_intermediate(active, top, base);
}
exit_free_buf:
qemu_vfree(buf);
exit_restore_reopen:
/* restore base open flags here if appropriate (e.g., change the base back
* to r/o). These reopens do not need to be atomic, since we won't abort
* even on failure here */
if (s->base_flags != bdrv_get_flags(base)) {
bdrv_reopen(base, s->base_flags, NULL);
}
if (s->orig_overlay_flags != bdrv_get_flags(overlay_bs)) {
bdrv_reopen(overlay_bs, s->orig_overlay_flags, NULL);
}
//.........这里部分代码省略.........
示例13: stream_run
static int coroutine_fn stream_run(Job *job, Error **errp)
{
StreamBlockJob *s = container_of(job, StreamBlockJob, common.job);
BlockBackend *blk = s->common.blk;
BlockDriverState *bs = blk_bs(blk);
BlockDriverState *base = s->base;
int64_t len;
int64_t offset = 0;
uint64_t delay_ns = 0;
int error = 0;
int ret = 0;
int64_t n = 0; /* bytes */
void *buf;
if (!bs->backing) {
goto out;
}
len = bdrv_getlength(bs);
if (len < 0) {
ret = len;
goto out;
}
job_progress_set_remaining(&s->common.job, len);
buf = qemu_blockalign(bs, STREAM_BUFFER_SIZE);
/* Turn on copy-on-read for the whole block device so that guest read
* requests help us make progress. Only do this when copying the entire
* backing chain since the copy-on-read operation does not take base into
* account.
*/
if (!base) {
bdrv_enable_copy_on_read(bs);
}
for ( ; offset < len; offset += n) {
bool copy;
/* Note that even when no rate limit is applied we need to yield
* with no pending I/O here so that bdrv_drain_all() returns.
*/
job_sleep_ns(&s->common.job, delay_ns);
if (job_is_cancelled(&s->common.job)) {
break;
}
copy = false;
ret = bdrv_is_allocated(bs, offset, STREAM_BUFFER_SIZE, &n);
if (ret == 1) {
/* Allocated in the top, no need to copy. */
} else if (ret >= 0) {
/* Copy if allocated in the intermediate images. Limit to the
* known-unallocated area [offset, offset+n*BDRV_SECTOR_SIZE). */
ret = bdrv_is_allocated_above(backing_bs(bs), base,
offset, n, &n);
/* Finish early if end of backing file has been reached */
if (ret == 0 && n == 0) {
n = len - offset;
}
copy = (ret == 1);
}
trace_stream_one_iteration(s, offset, n, ret);
if (copy) {
ret = stream_populate(blk, offset, n, buf);
}
if (ret < 0) {
BlockErrorAction action =
block_job_error_action(&s->common, s->on_error, true, -ret);
if (action == BLOCK_ERROR_ACTION_STOP) {
n = 0;
continue;
}
if (error == 0) {
error = ret;
}
if (action == BLOCK_ERROR_ACTION_REPORT) {
break;
}
}
ret = 0;
/* Publish progress */
job_progress_update(&s->common.job, n);
if (copy) {
delay_ns = block_job_ratelimit_get_delay(&s->common, n);
} else {
delay_ns = 0;
}
}
if (!base) {
bdrv_disable_copy_on_read(bs);
}
/* Do not remove the backing file if an error was there but ignored. */
ret = error;
//.........这里部分代码省略.........
示例14: mirror_run
static void coroutine_fn mirror_run(void *opaque)
{
MirrorBlockJob *s = opaque;
BlockDriverState *bs = s->common.bs;
int64_t sector_num, end, sectors_per_chunk, length;
uint64_t last_pause_ns;
BlockDriverInfo bdi;
char backing_filename[1024];
int ret = 0;
int n;
if (block_job_is_cancelled(&s->common)) {
goto immediate_exit;
}
s->common.len = bdrv_getlength(bs);
if (s->common.len <= 0) {
block_job_completed(&s->common, s->common.len);
return;
}
length = (bdrv_getlength(bs) + s->granularity - 1) / s->granularity;
s->in_flight_bitmap = bitmap_new(length);
/* If we have no backing file yet in the destination, we cannot let
* the destination do COW. Instead, we copy sectors around the
* dirty data if needed. We need a bitmap to do that.
*/
bdrv_get_backing_filename(s->target, backing_filename,
sizeof(backing_filename));
if (backing_filename[0] && !s->target->backing_hd) {
bdrv_get_info(s->target, &bdi);
if (s->granularity < bdi.cluster_size) {
s->buf_size = MAX(s->buf_size, bdi.cluster_size);
s->cow_bitmap = bitmap_new(length);
}
}
end = s->common.len >> BDRV_SECTOR_BITS;
s->buf = qemu_blockalign(bs, s->buf_size);
sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS;
mirror_free_init(s);
if (s->mode != MIRROR_SYNC_MODE_NONE) {
/* First part, loop on the sectors and initialize the dirty bitmap. */
BlockDriverState *base;
base = s->mode == MIRROR_SYNC_MODE_FULL ? NULL : bs->backing_hd;
for (sector_num = 0; sector_num < end; ) {
int64_t next = (sector_num | (sectors_per_chunk - 1)) + 1;
ret = bdrv_is_allocated_above(bs, base,
sector_num, next - sector_num, &n);
if (ret < 0) {
goto immediate_exit;
}
assert(n > 0);
if (ret == 1) {
bdrv_set_dirty(bs, sector_num, n);
sector_num = next;
} else {
sector_num += n;
}
}
}
bdrv_dirty_iter_init(bs, &s->hbi);
last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
for (;;) {
uint64_t delay_ns;
int64_t cnt;
bool should_complete;
if (s->ret < 0) {
ret = s->ret;
goto immediate_exit;
}
cnt = bdrv_get_dirty_count(bs);
/* Note that even when no rate limit is applied we need to yield
* periodically with no pending I/O so that qemu_aio_flush() returns.
* We do so every SLICE_TIME nanoseconds, or when there is an error,
* or when the source is clean, whichever comes first.
*/
if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - last_pause_ns < SLICE_TIME &&
s->common.iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
if (s->in_flight == MAX_IN_FLIGHT || s->buf_free_count == 0 ||
(cnt == 0 && s->in_flight > 0)) {
trace_mirror_yield(s, s->in_flight, s->buf_free_count, cnt);
qemu_coroutine_yield();
continue;
} else if (cnt != 0) {
mirror_iteration(s);
continue;
}
}
should_complete = false;
if (s->in_flight == 0 && cnt == 0) {
//.........这里部分代码省略.........
示例15: vhdx_log_flush_desc
/* Flushes the descriptor described by desc to the VHDX image file.
* If the descriptor is a data descriptor, than 'data' must be non-NULL,
* and >= 4096 bytes (VHDX_LOG_SECTOR_SIZE), containing the data to be
* written.
*
* Verification is performed to make sure the sequence numbers of a data
* descriptor match the sequence number in the desc.
*
* For a zero descriptor, it may describe multiple sectors to fill with zeroes.
* In this case, it should be noted that zeroes are written to disk, and the
* image file is not extended as a sparse file. */
static int vhdx_log_flush_desc(BlockDriverState *bs, VHDXLogDescriptor *desc,
VHDXLogDataSector *data)
{
int ret = 0;
uint64_t seq, file_offset;
uint32_t offset = 0;
void *buffer = NULL;
uint64_t count = 1;
int i;
buffer = qemu_blockalign(bs, VHDX_LOG_SECTOR_SIZE);
if (!memcmp(&desc->signature, "desc", 4)) {
/* data sector */
if (data == NULL) {
ret = -EFAULT;
goto exit;
}
/* The sequence number of the data sector must match that
* in the descriptor */
seq = data->sequence_high;
seq <<= 32;
seq |= data->sequence_low & 0xffffffff;
if (seq != desc->sequence_number) {
ret = -EINVAL;
goto exit;
}
/* Each data sector is in total 4096 bytes, however the first
* 8 bytes, and last 4 bytes, are located in the descriptor */
memcpy(buffer, &desc->leading_bytes, 8);
offset += 8;
memcpy(buffer+offset, data->data, 4084);
offset += 4084;
memcpy(buffer+offset, &desc->trailing_bytes, 4);
} else if (!memcmp(&desc->signature, "zero", 4)) {
/* write 'count' sectors of sector */
memset(buffer, 0, VHDX_LOG_SECTOR_SIZE);
count = desc->zero_length / VHDX_LOG_SECTOR_SIZE;
}
file_offset = desc->file_offset;
/* count is only > 1 if we are writing zeroes */
for (i = 0; i < count; i++) {
ret = bdrv_pwrite_sync(bs->file, file_offset, buffer,
VHDX_LOG_SECTOR_SIZE);
if (ret < 0) {
goto exit;
}
file_offset += VHDX_LOG_SECTOR_SIZE;
}
exit:
qemu_vfree(buffer);
return ret;
}