In gfs2_glock_iter_next, use rhashtable_walk_curr to stay at the current
hash table entry instead of scanning the hash table from the start for
each entry that wraps across a buffer boundary.

With this change, there is no benefit in allocating a bigger seq file
buffer anymore (if there even was one before).

Signed-off-by: Andreas Gruenbacher <[email protected]>
---
 fs/gfs2/glock.c | 56 ++++++++++++++++++++++++++++++++------------------------
 1 file changed, 32 insertions(+), 24 deletions(-)

diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 93c7cd2325b4..ca4cb052f0c3 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -1924,20 +1924,36 @@ void gfs2_glock_exit(void)
        destroy_workqueue(gfs2_delete_workqueue);
 }
 
-static void gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
+static void gfs2_glock_iter_next(struct gfs2_glock_iter *gi, loff_t n)
 {
-       while ((gi->gl = rhashtable_walk_next(&gi->hti))) {
-               if (IS_ERR(gi->gl)) {
-                       if (PTR_ERR(gi->gl) == -EAGAIN)
-                               continue;
-                       gi->gl = NULL;
-                       return;
+       bool curr = false;
+
+       if (n == 0) {
+               curr = true;
+               n = 1;
+       }
+
+       do {
+               if (curr) {
+                       gi->gl = rhashtable_walk_curr(&gi->hti);
+                       curr = false;
+               } else
+                       gi->gl = rhashtable_walk_next(&gi->hti);
+               if (IS_ERR_OR_NULL(gi->gl)) {
+                       if (!gi->gl)
+                               return;
+                       if (PTR_ERR(gi->gl) != -EAGAIN) {
+                               gi->gl = NULL;
+                               return;
+                       }
+                       n = 1;
+                       continue;
                }
                /* Skip entries for other sb and dead entries */
-               if (gi->sdp == gi->gl->gl_name.ln_sbd &&
-                   !__lockref_is_dead(&gi->gl->gl_lockref))
-                       return;
-       }
+               if (gi->sdp != gi->gl->gl_name.ln_sbd ||
+                   __lockref_is_dead(&gi->gl->gl_lockref))
+                       continue;
+       } while (--n);
 }
 
 static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
@@ -1947,10 +1963,10 @@ static void *gfs2_glock_seq_start(struct seq_file *seq, 
loff_t *pos)
        loff_t n;
 
        /*
-        * From the current position, we can either skip to the next hash table
-        * entry or start from the beginning.
+        * We can either stay where we are, skip to the next hash table
+        * entry, or start from the beginning.
         */
-       if (*pos <= gi->last_pos) {
+       if (*pos < gi->last_pos) {
                rhashtable_walk_exit(&gi->hti);
                rhashtable_walk_enter(&gl_hash_table, &gi->hti);
                n = *pos + 1;
@@ -1960,12 +1976,7 @@ static void *gfs2_glock_seq_start(struct seq_file *seq, 
loff_t *pos)
        if (rhashtable_walk_start(&gi->hti) != 0)
                return NULL;
 
-       while (n--) {
-               gfs2_glock_iter_next(gi);
-               if (!gi->gl)
-                       break;
-       }
-
+       gfs2_glock_iter_next(gi, n);
        gi->last_pos = *pos;
        return gi->gl;
 }
@@ -1977,7 +1988,7 @@ static void *gfs2_glock_seq_next(struct seq_file *seq, 
void *iter_ptr,
 
        (*pos)++;
        gi->last_pos = *pos;
-       gfs2_glock_iter_next(gi);
+       gfs2_glock_iter_next(gi, 1);
        return gi->gl;
 }
 
@@ -2055,9 +2066,6 @@ static int __gfs2_glocks_open(struct inode *inode, struct 
file *file,
                 * first call to rhashtable_walk_next gets us the first entry.
                 */
                gi->last_pos = -1;
-               seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | 
__GFP_NOWARN);
-               if (seq->buf)
-                       seq->size = GFS2_SEQ_GOODSIZE;
                gi->gl = NULL;
                rhashtable_walk_enter(&gl_hash_table, &gi->hti);
        }
-- 
2.14.3

Reply via email to