This defines and exports a platform specific custom vm_get_page_prot() via
subscribing ARCH_HAS_VM_GET_PAGE_PROT. Subsequently all __SXXX and __PXXX
macros can be dropped which are no longer needed.

Cc: Jeff Dike <jd...@addtoit.com>
Cc: linux...@lists.infradead.org
Cc: linux-ker...@vger.kernel.org
Signed-off-by: Anshuman Khandual <anshuman.khand...@arm.com>
---
 arch/um/Kconfig               |  1 +
 arch/um/include/asm/pgtable.h | 17 -----------------
 arch/um/kernel/mem.c          | 35 +++++++++++++++++++++++++++++++++++
 arch/x86/um/mem_32.c          |  2 +-
 4 files changed, 37 insertions(+), 18 deletions(-)

diff --git a/arch/um/Kconfig b/arch/um/Kconfig
index 4d398b80aea8..5836296868a8 100644
--- a/arch/um/Kconfig
+++ b/arch/um/Kconfig
@@ -9,6 +9,7 @@ config UML
        select ARCH_HAS_KCOV
        select ARCH_HAS_STRNCPY_FROM_USER
        select ARCH_HAS_STRNLEN_USER
+       select ARCH_HAS_VM_GET_PAGE_PROT
        select ARCH_NO_PREEMPT
        select HAVE_ARCH_AUDITSYSCALL
        select HAVE_ARCH_SECCOMP_FILTER
diff --git a/arch/um/include/asm/pgtable.h b/arch/um/include/asm/pgtable.h
index b9e20bbe2f75..d982622c0708 100644
--- a/arch/um/include/asm/pgtable.h
+++ b/arch/um/include/asm/pgtable.h
@@ -68,23 +68,6 @@ extern unsigned long end_iomem;
  * Also, write permissions imply read permissions. This is the closest we can
  * get..
  */
-#define __P000 PAGE_NONE
-#define __P001 PAGE_READONLY
-#define __P010 PAGE_COPY
-#define __P011 PAGE_COPY
-#define __P100 PAGE_READONLY
-#define __P101 PAGE_READONLY
-#define __P110 PAGE_COPY
-#define __P111 PAGE_COPY
-
-#define __S000 PAGE_NONE
-#define __S001 PAGE_READONLY
-#define __S010 PAGE_SHARED
-#define __S011 PAGE_SHARED
-#define __S100 PAGE_READONLY
-#define __S101 PAGE_READONLY
-#define __S110 PAGE_SHARED
-#define __S111 PAGE_SHARED
 
 /*
  * ZERO_PAGE is a global shared page that is always zero: used
diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
index 15295c3237a0..37c6c7b9dadc 100644
--- a/arch/um/kernel/mem.c
+++ b/arch/um/kernel/mem.c
@@ -197,3 +197,38 @@ void *uml_kmalloc(int size, int flags)
 {
        return kmalloc(size, flags);
 }
+
+pgprot_t vm_get_page_prot(unsigned long vm_flags)
+{
+       switch (vm_flags & (VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)) {
+       case VM_NONE:
+               return PAGE_NONE;
+       case VM_READ:
+               return PAGE_READONLY;
+       case VM_WRITE:
+       case VM_WRITE | VM_READ:
+               return PAGE_COPY;
+       case VM_EXEC:
+       case VM_EXEC | VM_READ:
+               return PAGE_READONLY;
+       case VM_EXEC | VM_WRITE:
+       case VM_EXEC | VM_WRITE | VM_READ:
+               return PAGE_COPY;
+       case VM_SHARED:
+               return PAGE_NONE;
+       case VM_SHARED | VM_READ:
+               return PAGE_READONLY;
+       case VM_SHARED | VM_WRITE:
+       case VM_SHARED | VM_WRITE | VM_READ:
+               return PAGE_SHARED;
+       case VM_SHARED | VM_EXEC:
+       case VM_SHARED | VM_EXEC | VM_READ:
+               return PAGE_READONLY;
+       case VM_SHARED | VM_EXEC | VM_WRITE:
+       case VM_SHARED | VM_EXEC | VM_WRITE | VM_READ:
+               return PAGE_SHARED;
+       default:
+               BUILD_BUG();
+       }
+}
+EXPORT_SYMBOL(vm_get_page_prot);
diff --git a/arch/x86/um/mem_32.c b/arch/x86/um/mem_32.c
index 19c5dbd46770..cafd01f730da 100644
--- a/arch/x86/um/mem_32.c
+++ b/arch/x86/um/mem_32.c
@@ -17,7 +17,7 @@ static int __init gate_vma_init(void)
        gate_vma.vm_start = FIXADDR_USER_START;
        gate_vma.vm_end = FIXADDR_USER_END;
        gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
-       gate_vma.vm_page_prot = __P101;
+       gate_vma.vm_page_prot = PAGE_READONLY;
 
        return 0;
 }
-- 
2.25.1

Reply via email to