The __wait_var_event facility calculates a wait queue from a hash of the
address of the variable being passed. Use the @page argument directly as
it is less to type and is the object that is being waited upon.

Cc: Matthew Wilcox <wi...@infradead.org>
Cc: Jan Kara <j...@suse.cz>
Cc: "Darrick J. Wong" <djw...@kernel.org>
Cc: Jason Gunthorpe <j...@nvidia.com>
Cc: Christoph Hellwig <h...@lst.de>
Cc: John Hubbard <jhubb...@nvidia.com>
Signed-off-by: Dan Williams <dan.j.willi...@intel.com>
---
 fs/ext4/inode.c   |    8 ++++----
 fs/fuse/dax.c     |    6 +++---
 fs/xfs/xfs_file.c |    6 +++---
 mm/memremap.c     |    2 +-
 4 files changed, 11 insertions(+), 11 deletions(-)

diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 601214453c3a..b028a4413bea 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -3961,10 +3961,10 @@ int ext4_break_layouts(struct inode *inode)
                if (!page)
                        return 0;
 
-               error = ___wait_var_event(&page->_refcount,
-                               atomic_read(&page->_refcount) == 1,
-                               TASK_INTERRUPTIBLE, 0, 0,
-                               ext4_wait_dax_page(inode));
+               error = ___wait_var_event(page,
+                                         atomic_read(&page->_refcount) == 1,
+                                         TASK_INTERRUPTIBLE, 0, 0,
+                                         ext4_wait_dax_page(inode));
        } while (error == 0);
 
        return error;
diff --git a/fs/fuse/dax.c b/fs/fuse/dax.c
index e23e802a8013..4e12108c68af 100644
--- a/fs/fuse/dax.c
+++ b/fs/fuse/dax.c
@@ -676,9 +676,9 @@ static int __fuse_dax_break_layouts(struct inode *inode, 
bool *retry,
                return 0;
 
        *retry = true;
-       return ___wait_var_event(&page->_refcount,
-                       atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE,
-                       0, 0, fuse_wait_dax_page(inode));
+       return ___wait_var_event(page, atomic_read(&page->_refcount) == 1,
+                                TASK_INTERRUPTIBLE, 0, 0,
+                                fuse_wait_dax_page(inode));
 }
 
 /* dmap_end == 0 leads to unmapping of whole file */
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index c6c80265c0b2..73e7b7ec0a4c 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -827,9 +827,9 @@ xfs_break_dax_layouts(
                return 0;
 
        *retry = true;
-       return ___wait_var_event(&page->_refcount,
-                       atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE,
-                       0, 0, xfs_wait_dax_page(inode));
+       return ___wait_var_event(page, atomic_read(&page->_refcount) == 1,
+                                TASK_INTERRUPTIBLE, 0, 0,
+                                xfs_wait_dax_page(inode));
 }
 
 int
diff --git a/mm/memremap.c b/mm/memremap.c
index 58b20c3c300b..95f6ffe9cb0f 100644
--- a/mm/memremap.c
+++ b/mm/memremap.c
@@ -520,7 +520,7 @@ bool __put_devmap_managed_page_refs(struct page *page, int 
refs)
         * stable because nobody holds a reference on the page.
         */
        if (page_ref_sub_return(page, refs) == 1)
-               wake_up_var(&page->_refcount);
+               wake_up_var(page);
        return true;
 }
 EXPORT_SYMBOL(__put_devmap_managed_page_refs);


Reply via email to