Pass @vmf to drop the separate @vma and @address arguments to
dax_associate_entry(), use the existing DAX flags to convey the @cow
argument, and replace the open-coded ALIGN().

Cc: Matthew Wilcox <wi...@infradead.org>
Cc: Jan Kara <j...@suse.cz>
Cc: "Darrick J. Wong" <djw...@kernel.org>
Cc: Jason Gunthorpe <j...@nvidia.com>
Cc: Christoph Hellwig <h...@lst.de>
Cc: John Hubbard <jhubb...@nvidia.com>
Signed-off-by: Dan Williams <dan.j.willi...@intel.com>
---
 fs/dax.c |    9 ++++-----
 1 file changed, 4 insertions(+), 5 deletions(-)

diff --git a/fs/dax.c b/fs/dax.c
index d2fb58a7449b..fad1c8a1d913 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -362,7 +362,7 @@ static inline void dax_mapping_set_cow(struct page *page)
  * FS_DAX_MAPPING_COW, and use page->index as refcount.
  */
 static void dax_associate_entry(void *entry, struct address_space *mapping,
-               struct vm_area_struct *vma, unsigned long address, bool cow)
+                               struct vm_fault *vmf, unsigned long flags)
 {
        unsigned long size = dax_entry_size(entry), pfn, index;
        int i = 0;
@@ -370,11 +370,11 @@ static void dax_associate_entry(void *entry, struct 
address_space *mapping,
        if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
                return;
 
-       index = linear_page_index(vma, address & ~(size - 1));
+       index = linear_page_index(vmf->vma, ALIGN(vmf->address, size));
        for_each_mapped_pfn(entry, pfn) {
                struct page *page = pfn_to_page(pfn);
 
-               if (cow) {
+               if (flags & DAX_COW) {
                        dax_mapping_set_cow(page);
                } else {
                        WARN_ON_ONCE(page->mapping);
@@ -882,8 +882,7 @@ static vm_fault_t dax_insert_entry(struct xa_state *xas, 
struct vm_fault *vmf,
                void *old;
 
                dax_disassociate_entry(entry, mapping, false);
-               dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address,
-                               cow);
+               dax_associate_entry(new_entry, mapping, vmf, flags);
                /*
                 * Only swap our new entry into the page cache if the current
                 * entry is a zero page or an empty entry.  If a normal PTE or


Reply via email to