Commit-ID:  7c1deeeb01308426a27a70d5a506aa5fae66dc62
Gitweb:     https://git.kernel.org/tip/7c1deeeb01308426a27a70d5a506aa5fae66dc62
Author:     Vincenzo Frascino <vincenzo.frasc...@arm.com>
AuthorDate: Fri, 21 Jun 2019 10:52:39 +0100
Committer:  Thomas Gleixner <t...@linutronix.de>
CommitDate: Sat, 22 Jun 2019 21:21:08 +0200

arm64: compat: VDSO setup for compat layer

If CONFIG_GENERIC_COMPAT_VDSO is enabled, compat vDSO is installed in a
compat (32 bit) process instead of sigpage.

Add the necessary code to setup the vDSO required pages.

Signed-off-by: Vincenzo Frascino <vincenzo.frasc...@arm.com>
Signed-off-by: Thomas Gleixner <t...@linutronix.de>
Tested-by: Shijith Thotton <sthot...@marvell.com>
Tested-by: Andre Przywara <andre.przyw...@arm.com>
Cc: linux-a...@vger.kernel.org
Cc: linux-arm-ker...@lists.infradead.org
Cc: linux-m...@vger.kernel.org
Cc: linux-kselft...@vger.kernel.org
Cc: Catalin Marinas <catalin.mari...@arm.com>
Cc: Will Deacon <will.dea...@arm.com>
Cc: Arnd Bergmann <a...@arndb.de>
Cc: Russell King <li...@armlinux.org.uk>
Cc: Ralf Baechle <r...@linux-mips.org>
Cc: Paul Burton <paul.bur...@mips.com>
Cc: Daniel Lezcano <daniel.lezc...@linaro.org>
Cc: Mark Salyzyn <saly...@android.com>
Cc: Peter Collingbourne <p...@google.com>
Cc: Shuah Khan <sh...@kernel.org>
Cc: Dmitry Safonov <0x7f454...@gmail.com>
Cc: Rasmus Villemoes <li...@rasmusvillemoes.dk>
Cc: Huw Davies <h...@codeweavers.com>
Link: 
https://lkml.kernel.org/r/20190621095252.32307-13-vincenzo.frasc...@arm.com

---
 arch/arm64/kernel/vdso.c | 90 ++++++++++++++++++++++++++++++++++++++++++++++--
 1 file changed, 88 insertions(+), 2 deletions(-)

diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index be23efc3f60d..354b11e27c07 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -29,12 +29,22 @@
 #include <asm/vdso.h>
 
 extern char vdso_start[], vdso_end[];
+#ifdef CONFIG_COMPAT_VDSO
+extern char vdso32_start[], vdso32_end[];
+#endif /* CONFIG_COMPAT_VDSO */
 
 /* vdso_lookup arch_index */
 enum arch_vdso_type {
        ARM64_VDSO = 0,
+#ifdef CONFIG_COMPAT_VDSO
+       ARM64_VDSO32 = 1,
+#endif /* CONFIG_COMPAT_VDSO */
 };
+#ifdef CONFIG_COMPAT_VDSO
+#define VDSO_TYPES             (ARM64_VDSO32 + 1)
+#else
 #define VDSO_TYPES             (ARM64_VDSO + 1)
+#endif /* CONFIG_COMPAT_VDSO */
 
 struct __vdso_abi {
        const char *name;
@@ -53,6 +63,13 @@ static struct __vdso_abi vdso_lookup[VDSO_TYPES] 
__ro_after_init = {
                .vdso_code_start = vdso_start,
                .vdso_code_end = vdso_end,
        },
+#ifdef CONFIG_COMPAT_VDSO
+       {
+               .name = "vdso32",
+               .vdso_code_start = vdso32_start,
+               .vdso_code_end = vdso32_end,
+       },
+#endif /* CONFIG_COMPAT_VDSO */
 };
 
 /*
@@ -163,24 +180,52 @@ up_fail:
 /*
  * Create and map the vectors page for AArch32 tasks.
  */
+#ifdef CONFIG_COMPAT_VDSO
+static int aarch32_vdso_mremap(const struct vm_special_mapping *sm,
+               struct vm_area_struct *new_vma)
+{
+       return __vdso_remap(ARM64_VDSO32, sm, new_vma);
+}
+#endif /* CONFIG_COMPAT_VDSO */
+
 /*
  * aarch32_vdso_pages:
  * 0 - kuser helpers
  * 1 - sigreturn code
+ * or (CONFIG_COMPAT_VDSO):
+ * 0 - kuser helpers
+ * 1 - vdso data
+ * 2 - vdso code
  */
 #define C_VECTORS      0
+#ifdef CONFIG_COMPAT_VDSO
+#define C_VVAR         1
+#define C_VDSO         2
+#define C_PAGES                (C_VDSO + 1)
+#else
 #define C_SIGPAGE      1
 #define C_PAGES                (C_SIGPAGE + 1)
+#endif /* CONFIG_COMPAT_VDSO */
 static struct page *aarch32_vdso_pages[C_PAGES] __ro_after_init;
-static const struct vm_special_mapping aarch32_vdso_spec[C_PAGES] = {
+static struct vm_special_mapping aarch32_vdso_spec[C_PAGES] = {
        {
                .name   = "[vectors]", /* ABI */
                .pages  = &aarch32_vdso_pages[C_VECTORS],
        },
+#ifdef CONFIG_COMPAT_VDSO
+       {
+               .name = "[vvar]",
+       },
+       {
+               .name = "[vdso]",
+               .mremap = aarch32_vdso_mremap,
+       },
+#else
        {
                .name   = "[sigpage]", /* ABI */
                .pages  = &aarch32_vdso_pages[C_SIGPAGE],
        },
+#endif /* CONFIG_COMPAT_VDSO */
 };
 
 static int aarch32_alloc_kuser_vdso_page(void)
@@ -203,7 +248,33 @@ static int aarch32_alloc_kuser_vdso_page(void)
        return 0;
 }
 
-static int __init aarch32_alloc_vdso_pages(void)
+#ifdef CONFIG_COMPAT_VDSO
+static int __aarch32_alloc_vdso_pages(void)
+{
+       int ret;
+
+       vdso_lookup[ARM64_VDSO32].dm = &aarch32_vdso_spec[C_VVAR];
+       vdso_lookup[ARM64_VDSO32].cm = &aarch32_vdso_spec[C_VDSO];
+
+       ret = __vdso_init(ARM64_VDSO32);
+       if (ret)
+               return ret;
+
+       ret = aarch32_alloc_kuser_vdso_page();
+       if (ret) {
+               unsigned long c_vvar =
+                       (unsigned long)page_to_virt(aarch32_vdso_pages[C_VVAR]);
+               unsigned long c_vdso =
+                       (unsigned long)page_to_virt(aarch32_vdso_pages[C_VDSO]);
+
+               free_page(c_vvar);
+               free_page(c_vdso);
+       }
+
+       return ret;
+}
+#else
+static int __aarch32_alloc_vdso_pages(void)
 {
        extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
        int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
@@ -224,6 +295,12 @@ static int __init aarch32_alloc_vdso_pages(void)
 
        return ret;
 }
+#endif /* CONFIG_COMPAT_VDSO */
+
+static int __init aarch32_alloc_vdso_pages(void)
+{
+       return __aarch32_alloc_vdso_pages();
+}
 arch_initcall(aarch32_alloc_vdso_pages);
 
 static int aarch32_kuser_helpers_setup(struct mm_struct *mm)
@@ -245,6 +322,7 @@ static int aarch32_kuser_helpers_setup(struct mm_struct *mm)
        return PTR_ERR_OR_ZERO(ret);
 }
 
+#ifndef CONFIG_COMPAT_VDSO
 static int aarch32_sigreturn_setup(struct mm_struct *mm)
 {
        unsigned long addr;
@@ -272,6 +350,7 @@ static int aarch32_sigreturn_setup(struct mm_struct *mm)
 out:
        return PTR_ERR_OR_ZERO(ret);
 }
+#endif /* !CONFIG_COMPAT_VDSO */
 
 int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
 {
@@ -285,7 +364,14 @@ int aarch32_setup_additional_pages(struct linux_binprm 
*bprm, int uses_interp)
        if (ret)
                goto out;
 
+#ifdef CONFIG_COMPAT_VDSO
+       ret = __setup_additional_pages(ARM64_VDSO32,
+                                      mm,
+                                      bprm,
+                                      uses_interp);
+#else
        ret = aarch32_sigreturn_setup(mm);
+#endif /* CONFIG_COMPAT_VDSO */
 
 out:
        up_write(&mm->mmap_sem);

Reply via email to