This adds an explicit check in various functions.

Signed-off-by: Aneesh Kumar K.V <aneesh.ku...@linux.ibm.com>
---
 arch/powerpc/mm/hash_utils_64.c  | 18 +++++++++++++++---
 arch/powerpc/mm/pgtable-hash64.c | 13 ++++++++++---
 arch/powerpc/mm/pgtable-radix.c  | 16 ++++++++++++++++
 arch/powerpc/mm/pgtable_64.c     |  5 +++++
 4 files changed, 46 insertions(+), 6 deletions(-)

diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index c6b39e7694ba..ef0ca3bf555d 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -786,9 +786,16 @@ void resize_hpt_for_hotplug(unsigned long new_mem_size)
 
 int hash__create_section_mapping(unsigned long start, unsigned long end, int 
nid)
 {
-       int rc = htab_bolt_mapping(start, end, __pa(start),
-                                  pgprot_val(PAGE_KERNEL), mmu_linear_psize,
-                                  mmu_kernel_ssize);
+       int rc;
+
+       if (end >= H_VMALLOC_START) {
+               pr_warn("Outisde the supported range\n");
+               return -1;
+       }
+
+       rc = htab_bolt_mapping(start, end, __pa(start),
+                              pgprot_val(PAGE_KERNEL), mmu_linear_psize,
+                              mmu_kernel_ssize);
 
        if (rc < 0) {
                int rc2 = htab_remove_mapping(start, end, mmu_linear_psize,
@@ -929,6 +936,11 @@ static void __init htab_initialize(void)
                DBG("creating mapping for region: %lx..%lx (prot: %lx)\n",
                    base, size, prot);
 
+               if ((base + size) >= H_VMALLOC_START) {
+                       pr_warn("Outisde the supported range\n");
+                       continue;
+               }
+
                BUG_ON(htab_bolt_mapping(base, base + size, __pa(base),
                                prot, mmu_linear_psize, mmu_kernel_ssize));
        }
diff --git a/arch/powerpc/mm/pgtable-hash64.c b/arch/powerpc/mm/pgtable-hash64.c
index c08d49046a96..d934de4e2b3a 100644
--- a/arch/powerpc/mm/pgtable-hash64.c
+++ b/arch/powerpc/mm/pgtable-hash64.c
@@ -112,9 +112,16 @@ int __meminit hash__vmemmap_create_mapping(unsigned long 
start,
                                       unsigned long page_size,
                                       unsigned long phys)
 {
-       int rc = htab_bolt_mapping(start, start + page_size, phys,
-                                  pgprot_val(PAGE_KERNEL),
-                                  mmu_vmemmap_psize, mmu_kernel_ssize);
+       int rc;
+
+       if ((start + page_size) >= H_VMEMMAP_END) {
+               pr_warn("Outisde the supported range\n");
+               return -1;
+       }
+
+       rc = htab_bolt_mapping(start, start + page_size, phys,
+                              pgprot_val(PAGE_KERNEL),
+                              mmu_vmemmap_psize, mmu_kernel_ssize);
        if (rc < 0) {
                int rc2 = htab_remove_mapping(start, start + page_size,
                                              mmu_vmemmap_psize,
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index ba485fbd81f1..c9b24bf78819 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -334,6 +334,12 @@ void __init radix_init_pgtable(void)
                 * page tables will be allocated within the range. No
                 * need or a node (which we don't have yet).
                 */
+
+               if ((reg->base + reg->size) >= RADIX_VMALLOC_START) {
+                       pr_warn("Outisde the supported range\n");
+                       continue;
+               }
+
                WARN_ON(create_physical_mapping(reg->base,
                                                reg->base + reg->size,
                                                -1));
@@ -866,6 +872,11 @@ static void __meminit remove_pagetable(unsigned long 
start, unsigned long end)
 
 int __meminit radix__create_section_mapping(unsigned long start, unsigned long 
end, int nid)
 {
+       if (end >= RADIX_VMALLOC_START) {
+               pr_warn("Outisde the supported range\n");
+               return -1;
+       }
+
        return create_physical_mapping(start, end, nid);
 }
 
@@ -893,6 +904,11 @@ int __meminit radix__vmemmap_create_mapping(unsigned long 
start,
        int nid = early_pfn_to_nid(phys >> PAGE_SHIFT);
        int ret;
 
+       if ((start + page_size) >= RADIX_VMEMMAP_END) {
+               pr_warn("Outisde the supported range\n");
+               return -1;
+       }
+
        ret = __map_kernel_page_nid(start, phys, __pgprot(flags), page_size, 
nid);
        BUG_ON(ret);
 
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 56068cac2a3c..72f58c076e26 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -121,6 +121,11 @@ void __iomem *__ioremap_at(phys_addr_t pa, void *ea, 
unsigned long size, pgprot_
        if (pgprot_val(prot) & H_PAGE_4K_PFN)
                return NULL;
 
+       if ((ea + size) >= (void *)IOREMAP_END) {
+               pr_warn("Outisde the supported range\n");
+               return NULL;
+       }
+
        WARN_ON(pa & ~PAGE_MASK);
        WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
        WARN_ON(size & ~PAGE_MASK);
-- 
2.20.1

Reply via email to