In steal_from_bitmap_to_front function, it finds last_zero_bit from i
using for_each_clear_bit.
But this makes some overhead that it starts from the 0 bit.
By adding find_last_zero_bit, I try to remove this overhead and
improve readibility.

Signed-off-by: Levi Yun <ppbuk5...@gmail.com>
---
 fs/btrfs/free-space-cache.c | 22 ++++++++--------------
 1 file changed, 8 insertions(+), 14 deletions(-)

diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index af0013d3df63..6d393c834fdd 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -2372,7 +2372,6 @@ static bool steal_from_bitmap_to_front(struct 
btrfs_free_space_ctl *ctl,
        u64 bitmap_offset;
        unsigned long i;
        unsigned long j;
-       unsigned long prev_j;
        u64 bytes;
 
        bitmap_offset = offset_to_bitmap(ctl, info->offset);
@@ -2388,20 +2387,15 @@ static bool steal_from_bitmap_to_front(struct 
btrfs_free_space_ctl *ctl,
                return false;
 
        i = offset_to_bit(bitmap->offset, ctl->unit, info->offset) - 1;
-       j = 0;
-       prev_j = (unsigned long)-1;
-       for_each_clear_bit_from(j, bitmap->bitmap, BITS_PER_BITMAP) {
-               if (j > i)
-                       break;
-               prev_j = j;
-       }
-       if (prev_j == i)
-               return false;
+       j = find_last_zero_bit(bitmap->bitmap, i);
 
-       if (prev_j == (unsigned long)-1)
-               bytes = (i + 1) * ctl->unit;
-       else
-               bytes = (i - prev_j) * ctl->unit;
+       if (j == i) {
+               if (!test_bit(i, bitmap->bitmap))
+                       return false;
+               else
+                       bytes = (i + 1) * ctl->unit;
+       } else
+               bytes = (i - j) * ctl->unit;
 
        info->offset -= bytes;
        info->bytes += bytes;
-- 
2.27.0

Reply via email to