Am 18.05.2016 um 09:41 schrieb Fam Zheng:
On Tue, 05/17 15:59, Peter Lieven wrote:
until now the allocation map was used only as a hint if a cluster
is allocated or not. If a block was not allocated (or Qemu had
no info about the allocation status) a get_block_status call was
issued to check the allocation status and possibly avoid
a subsequent read of unallocated sectors. If a block known to be
allocated the get_block_status call was omitted. In the other case
a get_block_status call was issued before every read to avoid
the necessity for a consistent allocation map. To avoid the
potential overhead of calling get_block_status for each and
every read request this took only place for the bigger requests.

This patch enhances this mechanism to cache the allocation
status and avoid calling get_block_status for blocks where
the allocation status has been queried before. This allows
for bypassing the read request even for smaller requests and
additionally omits calling get_block_status for known to be
unallocated blocks.
Looks good overall, I have some minor comments below, though.

Signed-off-by: Peter Lieven <p...@kamp.de>
---
  block/iscsi.c | 194 +++++++++++++++++++++++++++++++++++++++++++---------------
  1 file changed, 144 insertions(+), 50 deletions(-)

diff --git a/block/iscsi.c b/block/iscsi.c
index 10f3906..b25bebd 100644
--- a/block/iscsi.c
+++ b/block/iscsi.c
@@ -2,7 +2,7 @@
   * QEMU Block driver for iSCSI images
   *
   * Copyright (c) 2010-2011 Ronnie Sahlberg <ronniesahlb...@gmail.com>
- * Copyright (c) 2012-2015 Peter Lieven <p...@kamp.de>
+ * Copyright (c) 2012-2016 Peter Lieven <p...@kamp.de>
   *
   * Permission is hereby granted, free of charge, to any person obtaining a 
copy
   * of this software and associated documentation files (the "Software"), to 
deal
@@ -62,7 +62,9 @@ typedef struct IscsiLun {
      struct scsi_inquiry_logical_block_provisioning lbp;
      struct scsi_inquiry_block_limits bl;
      unsigned char *zeroblock;
-    unsigned long *allocationmap;
+    unsigned long *allocmap;
+    unsigned long *allocmap_valid;
+    long allocmap_size;
Could you add some comments about the relation between these three fields and
their respective semantics?

Sure.

Are you happy with this comment?

*/ The allocation map tracks which clusters (pages) on the iSCSI target are
 * allocated and which are not. In case a target returns zeros for unallocated
 * pages (iscsilun->lprz) we can directly return zeros instead of reading zeros 
over
 * the wire if a read request falls within an unallocated block. As there are 3 
possible
 * states we need 2 bitmaps to track. allocmap_valid keeps track if Qemus
 * information about a page is valid. allocmap tracks if a page is allocated or
 * not. In case Qemus has no valid information about a page the corresponding
 * allocmap entry should be switched to unallocated as well to force a new 
lookup of
 * the allocation status as lookups are generally skipped if a page is suspect
 * to be allocated. If a iSCSI target is opened with cache.direct = on the 
allocmap_valid
 * does not exist turning all cached information invalid so that a fresh lookup
 * is made for any page even if allocmap entry returns its unallocated. */


      int cluster_sectors;
      bool use_16_for_rw;
      bool write_protected;
@@ -415,39 +417,127 @@ static bool is_request_lun_aligned(int64_t sector_num, 
int nb_sectors,
      return 1;
  }
-static unsigned long *iscsi_allocationmap_init(IscsiLun *iscsilun)
+static void iscsi_allocmap_free(IscsiLun *iscsilun)
  {
-    return bitmap_try_new(DIV_ROUND_UP(sector_lun2qemu(iscsilun->num_blocks,
-                                                       iscsilun),
-                                       iscsilun->cluster_sectors));
+    g_free(iscsilun->allocmap);
+    g_free(iscsilun->allocmap_valid);
+    iscsilun->allocmap = NULL;
+    iscsilun->allocmap_valid = NULL;
  }
-static void iscsi_allocationmap_set(IscsiLun *iscsilun, int64_t sector_num,
-                                    int nb_sectors)
+
+static int iscsi_allocmap_init(IscsiLun *iscsilun, int open_flags)
  {
-    if (iscsilun->allocationmap == NULL) {
-        return;
+    iscsilun->allocmap_size =
+        DIV_ROUND_UP(sector_lun2qemu(iscsilun->num_blocks, iscsilun),
+                     iscsilun->cluster_sectors);
+
+    iscsilun->allocmap = bitmap_try_new(iscsilun->allocmap_size);
+    if (!iscsilun->allocmap) {
+        return -ENOMEM;
+    }
+
+    if (open_flags & BDRV_O_NOCACHE) {
+        return 0;
+    }
+
+    iscsilun->allocmap_valid = bitmap_try_new(iscsilun->allocmap_size);
+    if (!iscsilun->allocmap_valid) {
Do you want to free iscsilun->allocmap? If you keep the two maps together...

It does still work without the allocmap_valid, but it might be a good idea
to free that memory if we are under memory pressure.


+        return -ENOMEM;
      }
-    bitmap_set(iscsilun->allocationmap,
-               sector_num / iscsilun->cluster_sectors,
-               DIV_ROUND_UP(nb_sectors, iscsilun->cluster_sectors));
+
+    return 0;
  }
-static void iscsi_allocationmap_clear(IscsiLun *iscsilun, int64_t sector_num,
-                                      int nb_sectors)
+static void
+iscsi_allocmap_update(IscsiLun *iscsilun, int64_t sector_num,
+                           int nb_sectors, bool allocated, bool valid)
                               ^
Looks like the parameter list is not aligned.

  {
      int64_t cluster_num, nb_clusters;
-    if (iscsilun->allocationmap == NULL) {
+
+    if (iscsilun->allocmap == NULL) {
          return;
      }
      cluster_num = DIV_ROUND_UP(sector_num, iscsilun->cluster_sectors);
      nb_clusters = (sector_num + nb_sectors) / iscsilun->cluster_sectors
                    - cluster_num;
-    if (nb_clusters > 0) {
-        bitmap_clear(iscsilun->allocationmap, cluster_num, nb_clusters);
+    if (allocated) {
+        bitmap_set(iscsilun->allocmap,
+                   sector_num / iscsilun->cluster_sectors,
+                   DIV_ROUND_UP(nb_sectors, iscsilun->cluster_sectors));
+    } else if (nb_clusters > 0) {
+        bitmap_clear(iscsilun->allocmap, cluster_num, nb_clusters);
+    }
+
+    if (iscsilun->allocmap_valid == NULL) {
+        return;
+    }
... there will be one less branch and code is simpler.

I have to keep this one. Its valid to have no allocmap_valid bitmap. Thats
the case when cache.direct is on. In this case the map is always invalid
which returns to the old allocmap behaviour.


+    if (valid) {
+        if (nb_clusters > 0) {
+            bitmap_set(iscsilun->allocmap_valid, cluster_num, nb_clusters);
+        }
+    } else {
+        bitmap_clear(iscsilun->allocmap_valid,
+                     sector_num / iscsilun->cluster_sectors,
+                     DIV_ROUND_UP(nb_sectors, iscsilun->cluster_sectors));
      }
  }
+static void
+iscsi_allocmap_set_allocated(IscsiLun *iscsilun, int64_t sector_num,
+                                  int nb_sectors)
+{
+    iscsi_allocmap_update(iscsilun, sector_num, nb_sectors, true, true);
+}
+
+static void
+iscsi_allocmap_set_unallocated(IscsiLun *iscsilun, int64_t sector_num,
+                                    int nb_sectors)
+{
+    iscsi_allocmap_update(iscsilun, sector_num, nb_sectors, false, true);
+}
+
+static void iscsi_allocmap_set_invalid(IscsiLun *iscsilun, int64_t sector_num,
+                                            int nb_sectors)
+{
+    iscsi_allocmap_update(iscsilun, sector_num, nb_sectors, false, false);
+}
+
+static void iscsi_allocmap_invalidate(IscsiLun *iscsilun)
+{
+    if (iscsilun->allocmap) {
+        bitmap_zero(iscsilun->allocmap, iscsilun->allocmap_size);
+    }
+    if (iscsilun->allocmap_valid) {
+        bitmap_zero(iscsilun->allocmap_valid, iscsilun->allocmap_size);
+    }
+}
+
+static inline bool
+iscsi_allocmap_is_allocated(IscsiLun *iscsilun, int64_t sector_num,
+                            int nb_sectors)
+{
+    unsigned long size;
+    if (iscsilun->allocmap == NULL) {
+        return true;
+    }
+    size = DIV_ROUND_UP(sector_num + nb_sectors, iscsilun->cluster_sectors);
+    return !(find_next_bit(iscsilun->allocmap, size,
+                           sector_num / iscsilun->cluster_sectors) == size);
+}
+
+static inline bool iscsi_allocmap_is_valid(IscsiLun *iscsilun,
+                                           int64_t sector_num, int nb_sectors)
+{
+    unsigned long size;
+    if (iscsilun->allocmap_valid == NULL) {
+        return false;
+    }
+    size = DIV_ROUND_UP(sector_num + nb_sectors, iscsilun->cluster_sectors);
+    return (find_next_zero_bit(iscsilun->allocmap_valid, size,
+                               sector_num / iscsilun->cluster_sectors) == 
size);
+}
+
  static int coroutine_fn
  iscsi_co_writev_flags(BlockDriverState *bs, int64_t sector_num, int 
nb_sectors,
                        QEMUIOVector *iov, int flags)
@@ -507,26 +597,16 @@ retry:
      }
if (iTask.status != SCSI_STATUS_GOOD) {
+        iscsi_allocmap_set_invalid(iscsilun, sector_num, nb_sectors);
          return iTask.err_code;
      }
- iscsi_allocationmap_set(iscsilun, sector_num, nb_sectors);
+    iscsi_allocmap_set_allocated(iscsilun, sector_num, nb_sectors);
return 0;
  }
-static bool iscsi_allocationmap_is_allocated(IscsiLun *iscsilun,
-                                             int64_t sector_num, int 
nb_sectors)
-{
-    unsigned long size;
-    if (iscsilun->allocationmap == NULL) {
-        return true;
-    }
-    size = DIV_ROUND_UP(sector_num + nb_sectors, iscsilun->cluster_sectors);
-    return !(find_next_bit(iscsilun->allocationmap, size,
-                           sector_num / iscsilun->cluster_sectors) == size);
-}
static int64_t coroutine_fn iscsi_co_get_block_status(BlockDriverState *bs,
                                                    int64_t sector_num,
@@ -611,9 +691,9 @@ retry:
      }
if (ret & BDRV_BLOCK_ZERO) {
-        iscsi_allocationmap_clear(iscsilun, sector_num, *pnum);
+        iscsi_allocmap_set_unallocated(iscsilun, sector_num, *pnum);
      } else {
-        iscsi_allocationmap_set(iscsilun, sector_num, *pnum);
+        iscsi_allocmap_set_allocated(iscsilun, sector_num, *pnum);
      }
if (*pnum > nb_sectors) {
@@ -629,7 +709,7 @@ out:
      return ret;
  }
-static int coroutine_fn iscsi_co_readv(BlockDriverState *bs,
+ static int coroutine_fn iscsi_co_readv(BlockDriverState *bs,
A superfluous leading whitespace?

                                         int64_t sector_num, int nb_sectors,
                                         QEMUIOVector *iov)
  {
@@ -648,16 +728,25 @@ static int coroutine_fn iscsi_co_readv(BlockDriverState 
*bs,
          return -EINVAL;
      }
- if (iscsilun->lbprz && nb_sectors >= ISCSI_CHECKALLOC_THRES &&
-        !iscsi_allocationmap_is_allocated(iscsilun, sector_num, nb_sectors)) {
-        int64_t ret;
+    if (iscsi_allocmap_is_valid(iscsilun, sector_num, nb_sectors) &&
+        !iscsi_allocmap_is_allocated(iscsilun, sector_num, nb_sectors)) {
+            qemu_iovec_memset(iov, 0, 0x00, iov->size);
+            return 0;
+    }
+
+    if (nb_sectors >= ISCSI_CHECKALLOC_THRES &&
+        !iscsi_allocmap_is_valid(iscsilun, sector_num, nb_sectors) &&
+        !iscsi_allocmap_is_allocated(iscsilun, sector_num, nb_sectors)) {
          int pnum;
          BlockDriverState *file;
-        ret = iscsi_co_get_block_status(bs, sector_num, INT_MAX, &pnum, &file);
+        int64_t ret = iscsi_co_get_block_status(bs,
+                          sector_num - sector_num % iscsilun->cluster_sectors,
+                          INT_MAX, &pnum, &file);
          if (ret < 0) {
              return ret;
          }
-        if (ret & BDRV_BLOCK_ZERO && pnum >= nb_sectors) {
+        if (ret & BDRV_BLOCK_ZERO &&
+            pnum >= nb_sectors + sector_num % iscsilun->cluster_sectors) {
              qemu_iovec_memset(iov, 0, 0x00, iov->size);
              return 0;
          }
@@ -892,7 +981,6 @@ static BlockAIOCB *iscsi_aio_ioctl(BlockDriverState *bs,
      }
iscsi_set_events(iscsilun);
-
Superfluous whitespace change.

      return &acb->common;
  }
@@ -963,7 +1051,7 @@ retry:
          return iTask.err_code;
      }
- iscsi_allocationmap_clear(iscsilun, sector_num, nb_sectors);
+    iscsi_allocmap_set_invalid(iscsilun, sector_num, nb_sectors);
return 0;
  }
@@ -1053,13 +1141,14 @@ retry:
      }
if (iTask.status != SCSI_STATUS_GOOD) {
+        iscsi_allocmap_set_invalid(iscsilun, sector_num, nb_sectors);
          return iTask.err_code;
      }
if (flags & BDRV_REQ_MAY_UNMAP) {
-        iscsi_allocationmap_clear(iscsilun, sector_num, nb_sectors);
+        iscsi_allocmap_set_invalid(iscsilun, sector_num, nb_sectors);
      } else {
-        iscsi_allocationmap_set(iscsilun, sector_num, nb_sectors);
+        iscsi_allocmap_set_allocated(iscsilun, sector_num, nb_sectors);
      }
return 0;
@@ -1633,10 +1722,7 @@ static int iscsi_open(BlockDriverState *bs, QDict 
*options, int flags,
          iscsilun->cluster_sectors = (iscsilun->bl.opt_unmap_gran *
                                       iscsilun->block_size) >> 
BDRV_SECTOR_BITS;
          if (iscsilun->lbprz) {
-            iscsilun->allocationmap = iscsi_allocationmap_init(iscsilun);
-            if (iscsilun->allocationmap == NULL) {
-                ret = -ENOMEM;
-            }
+            ret = iscsi_allocmap_init(iscsilun, bs->open_flags);
          }
      }
@@ -1673,7 +1759,7 @@ static void iscsi_close(BlockDriverState *bs)
      }
      iscsi_destroy_context(iscsi);
      g_free(iscsilun->zeroblock);
-    g_free(iscsilun->allocationmap);
+    iscsi_allocmap_free(iscsilun);
      memset(iscsilun, 0, sizeof(IscsiLun));
  }
@@ -1750,9 +1836,9 @@ static int iscsi_truncate(BlockDriverState *bs, int64_t offset)
          return -EINVAL;
      }
- if (iscsilun->allocationmap != NULL) {
-        g_free(iscsilun->allocationmap);
-        iscsilun->allocationmap = iscsi_allocationmap_init(iscsilun);
+    if (iscsilun->allocmap != NULL) {
+        iscsi_allocmap_free(iscsilun);
+        iscsi_allocmap_init(iscsilun, bs->open_flags);
Need to handle the return value here?

I think when originally implementing this we decide that a truncate should not 
fail
if the allocmap can't be resized. The feature is just disabled in this case.

Thinking about Jeffs comment on my NFS patch I also need to free allocmap_valid
if we change from cache.direct = off -> on.

Thanks for reviewing,
Peter

Reply via email to