Hi,

On 05/06/15 15:49, Bob Peterson wrote:
Hi,

This patch allows the block allocation code to retain the buffers
for the resource groups so they don't need to be re-read from buffer
cache with every request. This is a performance improvement that's
especially noticeable when resource groups are very large. For
example, with 2GB resource groups and 4K blocks, there can be 33
blocks for every resource group. This patch allows those 33 buffers
to be kept around and not read in and thrown away with every
operation. The buffers are released when the resource group is
either synced or invalidated.
The blocks should be cached between operations, so this should only be resulting in a skip of the look up of the cached block, and no changes to the actual I/O. Does that mean that grab_cache_page() is slow I wonder? Or is this an issue of going around the retry loop due to lack of memory at some stage?

How does this interact with the rgrplvb support? I'd guess that with that turned on, this is no longer an issue, because we'd only read in the blocks for the rgrps that we are actually going to use?



Steve.

Signed-off-by: Bob Peterson <rpete...@redhat.com>
---
  fs/gfs2/glops.c | 14 +++++++++++---
  fs/gfs2/rgrp.c  | 23 +++++++++++++++++++----
  fs/gfs2/rgrp.h  |  1 +
  3 files changed, 31 insertions(+), 7 deletions(-)

diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index fe91951..c23377f 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -144,6 +144,12 @@ static void rgrp_go_sync(struct gfs2_glock *gl)
        struct gfs2_rgrpd *rgd;
        int error;
+ spin_lock(&gl->gl_spin);
+       rgd = gl->gl_object;
+       if (rgd)
+               gfs2_rgrp_brelse(rgd);
+       spin_unlock(&gl->gl_spin);
+
        if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
                return;
        GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
@@ -175,15 +181,17 @@ static void rgrp_go_inval(struct gfs2_glock *gl, int 
flags)
  {
        struct gfs2_sbd *sdp = gl->gl_sbd;
        struct address_space *mapping = &sdp->sd_aspace;
+       struct gfs2_rgrpd *rgd = gl->gl_object;
+
+       if (rgd)
+               gfs2_rgrp_brelse(rgd);
WARN_ON_ONCE(!(flags & DIO_METADATA));
        gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count));
        truncate_inode_pages_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
- if (gl->gl_object) {
-               struct gfs2_rgrpd *rgd = (struct gfs2_rgrpd *)gl->gl_object;
+       if (rgd)
                rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
-       }
  }
/**
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index cd53d6e..c6c6232 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -1244,14 +1244,13 @@ int gfs2_rgrp_go_lock(struct gfs2_holder *gh)
  }
/**
- * gfs2_rgrp_go_unlock - Release RG bitmaps read in with gfs2_rgrp_bh_get()
- * @gh: The glock holder for the resource group
+ * gfs2_rgrp_brelse - Release RG bitmaps read in with gfs2_rgrp_bh_get()
+ * @rgd: The resource group
   *
   */
-void gfs2_rgrp_go_unlock(struct gfs2_holder *gh)
+void gfs2_rgrp_brelse(struct gfs2_rgrpd *rgd)
  {
-       struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object;
        int x, length = rgd->rd_length;
for (x = 0; x < length; x++) {
@@ -1264,6 +1263,22 @@ void gfs2_rgrp_go_unlock(struct gfs2_holder *gh)
} +/**
+ * gfs2_rgrp_go_unlock - Unlock a rgrp glock
+ * @gh: The glock holder for the resource group
+ *
+ */
+
+void gfs2_rgrp_go_unlock(struct gfs2_holder *gh)
+{
+       struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object;
+       int demote_requested = test_bit(GLF_DEMOTE, &gh->gh_gl->gl_flags) |
+               test_bit(GLF_PENDING_DEMOTE, &gh->gh_gl->gl_flags);
+
+       if (rgd && demote_requested)
+               gfs2_rgrp_brelse(rgd);
+}
+
  int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
                             struct buffer_head *bh,
                             const struct gfs2_bitmap *bi, unsigned minlen, u64 
*ptrimmed)
diff --git a/fs/gfs2/rgrp.h b/fs/gfs2/rgrp.h
index 68972ec..c0ab33f 100644
--- a/fs/gfs2/rgrp.h
+++ b/fs/gfs2/rgrp.h
@@ -36,6 +36,7 @@ extern void gfs2_clear_rgrpd(struct gfs2_sbd *sdp);
  extern int gfs2_rindex_update(struct gfs2_sbd *sdp);
  extern void gfs2_free_clones(struct gfs2_rgrpd *rgd);
  extern int gfs2_rgrp_go_lock(struct gfs2_holder *gh);
+extern void gfs2_rgrp_brelse(struct gfs2_rgrpd *rgd);
  extern void gfs2_rgrp_go_unlock(struct gfs2_holder *gh);
extern struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip);

Reply via email to