To prepare changes to arch_calc_vm_prot_bits() in the next patch, and be
consistent with other architectures, move arch_vm_get_page_prot() and
arch_calc_vm_prot_bits() to arch/x86/include/asm/mman.h.

Signed-off-by: Yu-cheng Yu <yu-cheng...@intel.com>
Reviewed-by: Kees Cook <keesc...@chromium.org>
---
 arch/x86/include/asm/mman.h      | 30 ++++++++++++++++++++++++++++++
 arch/x86/include/uapi/asm/mman.h | 27 +++------------------------
 2 files changed, 33 insertions(+), 24 deletions(-)
 create mode 100644 arch/x86/include/asm/mman.h

diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
new file mode 100644
index 000000000000..629f6c81263a
--- /dev/null
+++ b/arch/x86/include/asm/mman.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_X86_MMAN_H
+#define _ASM_X86_MMAN_H
+
+#include <linux/mm.h>
+#include <uapi/asm/mman.h>
+
+#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
+/*
+ * Take the 4 protection key bits out of the vma->vm_flags
+ * value and turn them in to the bits that we can put in
+ * to a pte.
+ *
+ * Only override these if Protection Keys are available
+ * (which is only on 64-bit).
+ */
+#define arch_vm_get_page_prot(vm_flags)        __pgprot(       \
+               ((vm_flags) & VM_PKEY_BIT0 ? _PAGE_PKEY_BIT0 : 0) |     \
+               ((vm_flags) & VM_PKEY_BIT1 ? _PAGE_PKEY_BIT1 : 0) |     \
+               ((vm_flags) & VM_PKEY_BIT2 ? _PAGE_PKEY_BIT2 : 0) |     \
+               ((vm_flags) & VM_PKEY_BIT3 ? _PAGE_PKEY_BIT3 : 0))
+
+#define arch_calc_vm_prot_bits(prot, key) (            \
+               ((key) & 0x1 ? VM_PKEY_BIT0 : 0) |      \
+               ((key) & 0x2 ? VM_PKEY_BIT1 : 0) |      \
+               ((key) & 0x4 ? VM_PKEY_BIT2 : 0) |      \
+               ((key) & 0x8 ? VM_PKEY_BIT3 : 0))
+#endif
+
+#endif /* _ASM_X86_MMAN_H */
diff --git a/arch/x86/include/uapi/asm/mman.h b/arch/x86/include/uapi/asm/mman.h
index d4a8d0424bfb..3ce1923e6ed9 100644
--- a/arch/x86/include/uapi/asm/mman.h
+++ b/arch/x86/include/uapi/asm/mman.h
@@ -1,31 +1,10 @@
 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef _ASM_X86_MMAN_H
-#define _ASM_X86_MMAN_H
+#ifndef _UAPI_ASM_X86_MMAN_H
+#define _UAPI_ASM_X86_MMAN_H
 
 #define MAP_32BIT      0x40            /* only give out 32bit addresses */
 
-#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
-/*
- * Take the 4 protection key bits out of the vma->vm_flags
- * value and turn them in to the bits that we can put in
- * to a pte.
- *
- * Only override these if Protection Keys are available
- * (which is only on 64-bit).
- */
-#define arch_vm_get_page_prot(vm_flags)        __pgprot(       \
-               ((vm_flags) & VM_PKEY_BIT0 ? _PAGE_PKEY_BIT0 : 0) |     \
-               ((vm_flags) & VM_PKEY_BIT1 ? _PAGE_PKEY_BIT1 : 0) |     \
-               ((vm_flags) & VM_PKEY_BIT2 ? _PAGE_PKEY_BIT2 : 0) |     \
-               ((vm_flags) & VM_PKEY_BIT3 ? _PAGE_PKEY_BIT3 : 0))
-
-#define arch_calc_vm_prot_bits(prot, key) (            \
-               ((key) & 0x1 ? VM_PKEY_BIT0 : 0) |      \
-               ((key) & 0x2 ? VM_PKEY_BIT1 : 0) |      \
-               ((key) & 0x4 ? VM_PKEY_BIT2 : 0) |      \
-               ((key) & 0x8 ? VM_PKEY_BIT3 : 0))
-#endif
 
 #include <asm-generic/mman.h>
 
-#endif /* _ASM_X86_MMAN_H */
+#endif /* _UAPI_ASM_X86_MMAN_H */
-- 
2.21.0

Reply via email to