Restore an optimization removed in commit 10201655b0 "Fix debugfs glocks
dump": keep the glock hash table iterator active while the glock dump
file is held open.  This avoids having to rescan the hash table from the
start if the user-space buffer the kernel is writing to is sufficiently
small.

This is a partial fix only: if the user-space buffer is roughly as big
as the kernel buffer, the kernel will eventually end up at an entry that
exceeds the kernel-internal buffer (gfs2_glock_seq_show).  The output up
to but excluding that entry will be copied to user-space.  Upon the next
sequential read from user-space, gfs2_glock_seq_start will seek to that
partial entry at the current position, and will continue from there.  At
the moment, this requires scanning the hash table from the start.

Signed-off-by: Andreas Gruenbacher <[email protected]>
---
 fs/gfs2/glock.c | 30 +++++++++++++++++++++++-------
 1 file changed, 23 insertions(+), 7 deletions(-)

diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 11066d8647d2..93c7cd2325b4 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -1944,18 +1944,29 @@ static void *gfs2_glock_seq_start(struct seq_file *seq, 
loff_t *pos)
        __acquires(RCU)
 {
        struct gfs2_glock_iter *gi = seq->private;
-       loff_t n = *pos;
+       loff_t n;
+
+       /*
+        * From the current position, we can either skip to the next hash table
+        * entry or start from the beginning.
+        */
+       if (*pos <= gi->last_pos) {
+               rhashtable_walk_exit(&gi->hti);
+               rhashtable_walk_enter(&gl_hash_table, &gi->hti);
+               n = *pos + 1;
+       } else
+               n = *pos - gi->last_pos;
 
-       rhashtable_walk_enter(&gl_hash_table, &gi->hti);
        if (rhashtable_walk_start(&gi->hti) != 0)
                return NULL;
 
-       do {
+       while (n--) {
                gfs2_glock_iter_next(gi);
-       } while (gi->gl && n--);
+               if (!gi->gl)
+                       break;
+       }
 
        gi->last_pos = *pos;
-
        return gi->gl;
 }
 
@@ -1967,7 +1978,6 @@ static void *gfs2_glock_seq_next(struct seq_file *seq, 
void *iter_ptr,
        (*pos)++;
        gi->last_pos = *pos;
        gfs2_glock_iter_next(gi);
-
        return gi->gl;
 }
 
@@ -1978,7 +1988,6 @@ static void gfs2_glock_seq_stop(struct seq_file *seq, 
void *iter_ptr)
 
        gi->gl = NULL;
        rhashtable_walk_stop(&gi->hti);
-       rhashtable_walk_exit(&gi->hti);
 }
 
 static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
@@ -2041,10 +2050,16 @@ static int __gfs2_glocks_open(struct inode *inode, 
struct file *file,
                struct gfs2_glock_iter *gi = seq->private;
 
                gi->sdp = inode->i_private;
+               /*
+                * Initially, we are "before" the first hash table entry; the
+                * first call to rhashtable_walk_next gets us the first entry.
+                */
+               gi->last_pos = -1;
                seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | 
__GFP_NOWARN);
                if (seq->buf)
                        seq->size = GFS2_SEQ_GOODSIZE;
                gi->gl = NULL;
+               rhashtable_walk_enter(&gl_hash_table, &gi->hti);
        }
        return ret;
 }
@@ -2060,6 +2075,7 @@ static int gfs2_glocks_release(struct inode *inode, 
struct file *file)
        struct gfs2_glock_iter *gi = seq->private;
 
        gi->gl = NULL;
+       rhashtable_walk_exit(&gi->hti);
        return seq_release_private(inode, file);
 }
 
-- 
2.14.3

Reply via email to