Signed-off-by: Kevin Wolf
---
block/vdi.c | 72 +++--
1 file changed, 41 insertions(+), 31 deletions(-)
diff --git a/block/vdi.c b/block/vdi.c
index 8295511..e5fe4e8 100644
--- a/block/vdi.c
+++ b/block/vdi.c
@@ -611,53 +611,55 @@ vdi_co_preadv(BlockDriverState *bs, uint64_t offset,
uint64_t bytes,
return ret;
}
-static int vdi_co_write(BlockDriverState *bs,
-int64_t sector_num, const uint8_t *buf, int nb_sectors)
+static int coroutine_fn
+vdi_co_pwritev(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
+ QEMUIOVector *qiov, int flags)
{
BDRVVdiState *s = bs->opaque;
+QEMUIOVector local_qiov;
uint32_t bmap_entry;
uint32_t block_index;
-uint32_t sector_in_block;
-uint32_t n_sectors;
+uint32_t offset_in_block;
+uint32_t n_bytes;
uint32_t bmap_first = VDI_UNALLOCATED;
uint32_t bmap_last = VDI_UNALLOCATED;
uint8_t *block = NULL;
+uint64_t bytes_done = 0;
int ret = 0;
logout("\n");
-while (ret >= 0 && nb_sectors > 0) {
-block_index = sector_num / s->block_sectors;
-sector_in_block = sector_num % s->block_sectors;
-n_sectors = s->block_sectors - sector_in_block;
-if (n_sectors > nb_sectors) {
-n_sectors = nb_sectors;
-}
+qemu_iovec_init(_qiov, qiov->niov);
-logout("will write %u sectors starting at sector %" PRIu64 "\n",
- n_sectors, sector_num);
+while (ret >= 0 && bytes > 0) {
+block_index = offset / s->block_size;
+offset_in_block = offset % s->block_size;
+n_bytes = MIN(bytes, s->block_size - offset_in_block);
+
+logout("will write %u bytes starting at offset %" PRIu64 "\n",
+ n_bytes, offset);
/* prepare next AIO request */
bmap_entry = le32_to_cpu(s->bmap[block_index]);
if (!VDI_IS_ALLOCATED(bmap_entry)) {
/* Allocate new block and write to it. */
-uint64_t offset;
+uint64_t data_offset;
bmap_entry = s->header.blocks_allocated;
s->bmap[block_index] = cpu_to_le32(bmap_entry);
s->header.blocks_allocated++;
-offset = s->header.offset_data / SECTOR_SIZE +
- (uint64_t)bmap_entry * s->block_sectors;
+data_offset = s->header.offset_data +
+ (uint64_t)bmap_entry * s->block_size;
if (block == NULL) {
block = g_malloc(s->block_size);
bmap_first = block_index;
}
bmap_last = block_index;
/* Copy data to be written to new block and zero unused parts. */
-memset(block, 0, sector_in_block * SECTOR_SIZE);
-memcpy(block + sector_in_block * SECTOR_SIZE,
- buf, n_sectors * SECTOR_SIZE);
-memset(block + (sector_in_block + n_sectors) * SECTOR_SIZE, 0,
- (s->block_sectors - n_sectors - sector_in_block) *
SECTOR_SIZE);
+memset(block, 0, offset_in_block);
+qemu_iovec_to_buf(qiov, bytes_done, block + offset_in_block,
+ n_bytes);
+memset(block + offset_in_block + n_bytes, 0,
+ s->block_size - n_bytes - offset_in_block);
/* Note that this coroutine does not yield anywhere from reading
the
* bmap entry until here, so in regards to all the coroutines
trying
@@ -667,12 +669,12 @@ static int vdi_co_write(BlockDriverState *bs,
* acquire the lock and thus the padded cluster is written before
* the other coroutines can write to the affected area. */
qemu_co_mutex_lock(>write_lock);
-ret = bdrv_write(bs->file->bs, offset, block, s->block_sectors);
+ret = bdrv_pwrite(bs->file->bs, data_offset, block, s->block_size);
qemu_co_mutex_unlock(>write_lock);
} else {
-uint64_t offset = s->header.offset_data / SECTOR_SIZE +
- (uint64_t)bmap_entry * s->block_sectors +
- sector_in_block;
+uint64_t data_offset = s->header.offset_data +
+ (uint64_t)bmap_entry * s->block_size +
+ offset_in_block;
qemu_co_mutex_lock(>write_lock);
/* This lock is only used to make sure the following write
operation
* is executed after the write issued by the coroutine allocating
@@ -683,16 +685,23 @@ static int vdi_co_write(BlockDriverState *bs,
* that that write operation has returned (there may be other
writes
* in flight, but they do not concern this very operation). */
qemu_co_mutex_unlock(>write_lock);
-ret = bdrv_write(bs->file->bs, offset, buf, n_sectors);
+
+