In preparation for device-dax to use the same mapping machinery as
fsdax, add support for device-dax compound pages.

Presently this is handled by dax_set_mapping() which is careful to only
update page->mapping for head pages. However, it does that by looking at
properties in the 'struct dev_dax' instance associated with the page.
Switch to just checking PageHead() directly in the functions that
iterate over pages in a large mapping.

Cc: Matthew Wilcox <wi...@infradead.org>
Cc: Jan Kara <j...@suse.cz>
Cc: "Darrick J. Wong" <djw...@kernel.org>
Cc: Jason Gunthorpe <j...@nvidia.com>
Cc: Christoph Hellwig <h...@lst.de>
Cc: John Hubbard <jhubb...@nvidia.com>
Signed-off-by: Dan Williams <dan.j.willi...@intel.com>
---
 drivers/dax/Kconfig   |    1 +
 drivers/dax/mapping.c |   16 ++++++++++++++++
 2 files changed, 17 insertions(+)

diff --git a/drivers/dax/Kconfig b/drivers/dax/Kconfig
index 205e9dda8928..2eddd32c51f4 100644
--- a/drivers/dax/Kconfig
+++ b/drivers/dax/Kconfig
@@ -9,6 +9,7 @@ if DAX
 config DEV_DAX
        tristate "Device DAX: direct access mapping device"
        depends on TRANSPARENT_HUGEPAGE
+       depends on !FS_DAX_LIMITED
        help
          Support raw access to differentiated (persistence, bandwidth,
          latency...) memory via an mmap(2) capable character
diff --git a/drivers/dax/mapping.c b/drivers/dax/mapping.c
index 70576aa02148..5d4b9601f183 100644
--- a/drivers/dax/mapping.c
+++ b/drivers/dax/mapping.c
@@ -345,6 +345,8 @@ static vm_fault_t dax_associate_entry(void *entry,
        for_each_mapped_pfn(entry, pfn) {
                struct page *page = pfn_to_page(pfn);
 
+               page = compound_head(page);
+
                if (flags & DAX_COW) {
                        dax_mapping_set_cow(page);
                } else {
@@ -353,6 +355,9 @@ static vm_fault_t dax_associate_entry(void *entry,
                        page->index = index + i++;
                        page_ref_inc(page);
                }
+
+               if (PageHead(page))
+                       break;
        }
 
        return 0;
@@ -372,6 +377,9 @@ static void dax_disassociate_entry(void *entry, struct 
address_space *mapping,
 
        for_each_mapped_pfn(entry, pfn) {
                page = pfn_to_page(pfn);
+
+               page = compound_head(page);
+
                if (dax_mapping_is_cow(page->mapping)) {
                        /* keep the CoW flag if this page is still shared */
                        if (page->index-- > 0)
@@ -383,6 +391,9 @@ static void dax_disassociate_entry(void *entry, struct 
address_space *mapping,
                }
                page->mapping = NULL;
                page->index = 0;
+
+               if (PageHead(page))
+                       break;
        }
 
        if (trunc && !dax_mapping_is_cow(page->mapping)) {
@@ -660,11 +671,16 @@ static struct page *dax_zap_pages(struct xa_state *xas, 
void *entry)
        for_each_mapped_pfn(entry, pfn) {
                struct page *page = pfn_to_page(pfn);
 
+               page = compound_head(page);
+
                if (zap)
                        page_ref_dec(page);
 
                if (!ret && !dax_page_idle(page))
                        ret = page;
+
+               if (PageHead(page))
+                       break;
        }
 
        if (zap)


Reply via email to