Move the vdso datapage in front of the VDSO area,
before vdso test.

This will allow to remove the __kernel_datapage_offset symbol
and simplify __get_datapage() in following patches.

Signed-off-by: Christophe Leroy <christophe.le...@csgroup.eu>
---
 arch/powerpc/include/asm/mmu_context.h |  2 +-
 arch/powerpc/kernel/vdso.c             | 14 +++++++-------
 2 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/arch/powerpc/include/asm/mmu_context.h 
b/arch/powerpc/include/asm/mmu_context.h
index d54358cb5be1..e5a5e3cb7724 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -262,7 +262,7 @@ extern void arch_exit_mmap(struct mm_struct *mm);
 static inline void arch_unmap(struct mm_struct *mm,
                              unsigned long start, unsigned long end)
 {
-       unsigned long vdso_base = (unsigned long)mm->context.vdso;
+       unsigned long vdso_base = (unsigned long)mm->context.vdso - PAGE_SIZE;
 
        if (start <= vdso_base && vdso_base < end)
                mm->context.vdso = NULL;
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index 87b77b793029..7042e9edfb96 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -123,7 +123,7 @@ static int vdso_mremap(const struct vm_special_mapping *sm, 
struct vm_area_struc
        if (new_size != text_size + PAGE_SIZE)
                return -EINVAL;
 
-       current->mm->context.vdso = (void __user *)new_vma->vm_start;
+       current->mm->context.vdso = (void __user *)new_vma->vm_start + 
PAGE_SIZE;
 
        return 0;
 }
@@ -198,7 +198,7 @@ static int __arch_setup_additional_pages(struct 
linux_binprm *bprm, int uses_int
         * install_special_mapping or the perf counter mmap tracking code
         * will fail to recognise it as a vDSO.
         */
-       mm->context.vdso = (void __user *)vdso_base;
+       mm->context.vdso = (void __user *)vdso_base + PAGE_SIZE;
 
        /*
         * our vma flags don't have VM_WRITE so by default, the process isn't
@@ -510,7 +510,7 @@ static __init int vdso_fixup_datapage(struct lib32_elfinfo 
*v32,
                return -1;
        }
        *((int *)(vdso64_kbase + sym64->st_value - VDSO64_LBASE)) =
-               (vdso64_pages << PAGE_SHIFT) -
+               -PAGE_SIZE -
                (sym64->st_value - VDSO64_LBASE);
 #endif /* CONFIG_PPC64 */
 
@@ -522,7 +522,7 @@ static __init int vdso_fixup_datapage(struct lib32_elfinfo 
*v32,
                return -1;
        }
        *((int *)(vdso32_kbase + (sym32->st_value - VDSO32_LBASE))) =
-               (vdso32_pages << PAGE_SHIFT) -
+               -PAGE_SIZE -
                (sym32->st_value - VDSO32_LBASE);
 #endif
 
@@ -696,10 +696,10 @@ static struct page ** __init vdso_setup_pages(void 
*start, void *end)
        if (!pagelist)
                panic("%s: Cannot allocate page list for VDSO", __func__);
 
-       for (i = 0; i < pages; i++)
-               pagelist[i] = virt_to_page(start + i * PAGE_SIZE);
+       pagelist[0] = virt_to_page(vdso_data);
 
-       pagelist[i] = virt_to_page(vdso_data);
+       for (i = 0; i < pages; i++)
+               pagelist[i + 1] = virt_to_page(start + i * PAGE_SIZE);
 
        return pagelist;
 }
-- 
2.25.0

Reply via email to