Add a new flag for struct dev_pagemap which designates that a a pagemap
is described as a set of compound pages or in other words, that how
pages are grouped together in the page tables are reflected in how we
describe struct pages. This means that rather than initializing
individual struct pages, we also initialize these struct pages, as
compound pages (on x86: 2M or 1G compound pages)

For certain ZONE_DEVICE users, like device-dax, which have a fixed page
size, this creates an opportunity to optimize GUP and GUP-fast walkers,
thus playing the same tricks as hugetlb pages.

Signed-off-by: Joao Martins <joao.m.mart...@oracle.com>
---
 include/linux/memremap.h | 2 ++
 mm/memremap.c            | 8 ++++++--
 mm/page_alloc.c          | 7 +++++++
 3 files changed, 15 insertions(+), 2 deletions(-)

diff --git a/include/linux/memremap.h b/include/linux/memremap.h
index 79c49e7f5c30..f8f26b2cc3da 100644
--- a/include/linux/memremap.h
+++ b/include/linux/memremap.h
@@ -90,6 +90,7 @@ struct dev_pagemap_ops {
 };
 
 #define PGMAP_ALTMAP_VALID     (1 << 0)
+#define PGMAP_COMPOUND         (1 << 1)
 
 /**
  * struct dev_pagemap - metadata for ZONE_DEVICE mappings
@@ -114,6 +115,7 @@ struct dev_pagemap {
        struct completion done;
        enum memory_type type;
        unsigned int flags;
+       unsigned int align;
        const struct dev_pagemap_ops *ops;
        void *owner;
        int nr_range;
diff --git a/mm/memremap.c b/mm/memremap.c
index 16b2fb482da1..287a24b7a65a 100644
--- a/mm/memremap.c
+++ b/mm/memremap.c
@@ -277,8 +277,12 @@ static int pagemap_range(struct dev_pagemap *pgmap, struct 
mhp_params *params,
        memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
                                PHYS_PFN(range->start),
                                PHYS_PFN(range_len(range)), pgmap);
-       percpu_ref_get_many(pgmap->ref, pfn_end(pgmap, range_id)
-                       - pfn_first(pgmap, range_id));
+       if (pgmap->flags & PGMAP_COMPOUND)
+               percpu_ref_get_many(pgmap->ref, (pfn_end(pgmap, range_id)
+                       - pfn_first(pgmap, range_id)) / PHYS_PFN(pgmap->align));
+       else
+               percpu_ref_get_many(pgmap->ref, pfn_end(pgmap, range_id)
+                               - pfn_first(pgmap, range_id));
        return 0;
 
 err_add_memory:
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index eaa227a479e4..9716ecd58e29 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6116,6 +6116,8 @@ void __ref memmap_init_zone_device(struct zone *zone,
        unsigned long pfn, end_pfn = start_pfn + nr_pages;
        struct pglist_data *pgdat = zone->zone_pgdat;
        struct vmem_altmap *altmap = pgmap_altmap(pgmap);
+       bool compound = pgmap->flags & PGMAP_COMPOUND;
+       unsigned int align = PHYS_PFN(pgmap->align);
        unsigned long zone_idx = zone_idx(zone);
        unsigned long start = jiffies;
        int nid = pgdat->node_id;
@@ -6171,6 +6173,11 @@ void __ref memmap_init_zone_device(struct zone *zone,
                }
        }
 
+       if (compound) {
+               for (pfn = start_pfn; pfn < end_pfn; pfn += align)
+                       prep_compound_page(pfn_to_page(pfn), 
order_base_2(align));
+       }
+
        pr_info("%s initialised %lu pages in %ums\n", __func__,
                nr_pages, jiffies_to_msecs(jiffies - start));
 }
-- 
2.17.1
_______________________________________________
Linux-nvdimm mailing list -- linux-nvdimm@lists.01.org
To unsubscribe send an email to linux-nvdimm-le...@lists.01.org

Reply via email to