alloc_gigantic_page() implements an allocation method where it scans over
various zones looking for a large contiguous memory block which could not
have been allocated through the buddy allocator. A subsequent patch which
tests arch page table helpers needs such a method to allocate PUD_SIZE
sized memory block. In the future such methods might have other use cases
as well. So alloc_gigantic_page() has been split carving out actual memory
allocation method and made available via new alloc_gigantic_page_order().

Cc: Andrew Morton <[email protected]>
Cc: Vlastimil Babka <[email protected]>
Cc: Greg Kroah-Hartman <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Mike Rapoport <[email protected]>
Cc: Mike Kravetz <[email protected]>
Cc: Jason Gunthorpe <[email protected]>
Cc: Dan Williams <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Mark Rutland <[email protected]>
Cc: Mark Brown <[email protected]>
Cc: Steven Price <[email protected]>
Cc: Ard Biesheuvel <[email protected]>
Cc: Masahiro Yamada <[email protected]>
Cc: Kees Cook <[email protected]>
Cc: Tetsuo Handa <[email protected]>
Cc: Matthew Wilcox <[email protected]>
Cc: Sri Krishna chowdary <[email protected]>
Cc: Dave Hansen <[email protected]>
Cc: Russell King - ARM Linux <[email protected]>
Cc: Michael Ellerman <[email protected]>
Cc: Paul Mackerras <[email protected]>
Cc: Martin Schwidefsky <[email protected]>
Cc: Heiko Carstens <[email protected]>
Cc: "David S. Miller" <[email protected]>
Cc: Vineet Gupta <[email protected]>
Cc: James Hogan <[email protected]>
Cc: Paul Burton <[email protected]>
Cc: Ralf Baechle <[email protected]>
Cc: Kirill A. Shutemov <[email protected]>
Cc: Gerald Schaefer <[email protected]>
Cc: Christophe Leroy <[email protected]>
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Signed-off-by: Anshuman Khandual <[email protected]>
---
 include/linux/hugetlb.h |  9 +++++++++
 mm/hugetlb.c            | 24 ++++++++++++++++++++++--
 2 files changed, 31 insertions(+), 2 deletions(-)

diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 4c5a16b..7ff1e36 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -298,6 +298,9 @@ static inline bool is_file_hugepages(struct file *file)
 }
 
 
+struct page *
+alloc_gigantic_page_order(unsigned int order, gfp_t gfp_mask,
+                         int nid, nodemask_t *nodemask);
 #else /* !CONFIG_HUGETLBFS */
 
 #define is_file_hugepages(file)                        false
@@ -309,6 +312,12 @@ hugetlb_file_setup(const char *name, size_t size, 
vm_flags_t acctflag,
        return ERR_PTR(-ENOSYS);
 }
 
+static inline struct page *
+alloc_gigantic_page_order(unsigned int order, gfp_t gfp_mask,
+                         int nid, nodemask_t *nodemask)
+{
+       return NULL;
+}
 #endif /* !CONFIG_HUGETLBFS */
 
 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 977f9a3..2996e44 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1066,10 +1066,9 @@ static bool zone_spans_last_pfn(const struct zone *zone,
        return zone_spans_pfn(zone, last_pfn);
 }
 
-static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
+struct page *alloc_gigantic_page_order(unsigned int order, gfp_t gfp_mask,
                int nid, nodemask_t *nodemask)
 {
-       unsigned int order = huge_page_order(h);
        unsigned long nr_pages = 1 << order;
        unsigned long ret, pfn, flags;
        struct zonelist *zonelist;
@@ -1105,6 +1104,14 @@ static struct page *alloc_gigantic_page(struct hstate 
*h, gfp_t gfp_mask,
        return NULL;
 }
 
+static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
+                                       int nid, nodemask_t *nodemask)
+{
+       unsigned int order = huge_page_order(h);
+
+       return alloc_gigantic_page_order(order, gfp_mask, nid, nodemask);
+}
+
 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
 static void prep_compound_gigantic_page(struct page *page, unsigned int order);
 #else /* !CONFIG_CONTIG_ALLOC */
@@ -1113,6 +1120,12 @@ static struct page *alloc_gigantic_page(struct hstate 
*h, gfp_t gfp_mask,
 {
        return NULL;
 }
+
+struct page *alloc_gigantic_page_order(unsigned int order, gfp_t gfp_mask,
+                                      int nid, nodemask_t *nodemask)
+{
+       return NULL;
+}
 #endif /* CONFIG_CONTIG_ALLOC */
 
 #else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */
@@ -1121,6 +1134,13 @@ static struct page *alloc_gigantic_page(struct hstate 
*h, gfp_t gfp_mask,
 {
        return NULL;
 }
+
+struct page *alloc_gigantic_page_order(unsigned int order, gfp_t gfp_mask,
+                                      int nid, nodemask_t *nodemask)
+{
+       return NULL;
+}
+
 static inline void free_gigantic_page(struct page *page, unsigned int order) { 
}
 static inline void destroy_compound_gigantic_page(struct page *page,
                                                unsigned int order) { }
-- 
2.7.4

Reply via email to