An upcoming change will allocate the datapages dynamically instead of as
part of the kernel image. Such pages can only be mapped through
'struct page' and not through PFNs.

Prepare for the dynamic allocation by mapping through 'struct page'.

VM_MIXEDMAP is necessary for the call to vmf_insert_page() in the timens
prefault path to work.

Signed-off-by: Thomas Weißschuh <thomas.weisssc...@linutronix.de>
---
 lib/vdso/datastore.c | 24 +++++++++++++-----------
 1 file changed, 13 insertions(+), 11 deletions(-)

diff --git a/lib/vdso/datastore.c b/lib/vdso/datastore.c
index 
7377fcb6e1dfe31d02ffcae371fdf9da069ae4c1..6e5feb4a95b85f5a1cbdced7cdeddc593fcbad40
 100644
--- a/lib/vdso/datastore.c
+++ b/lib/vdso/datastore.c
@@ -39,14 +39,15 @@ struct vdso_arch_data *vdso_k_arch_data = 
&vdso_arch_data_store.data;
 static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
                             struct vm_area_struct *vma, struct vm_fault *vmf)
 {
-       struct page *timens_page = find_timens_vvar_page(vma);
-       unsigned long pfn;
+       struct page *page, *timens_page;
+
+       timens_page = find_timens_vvar_page(vma);
 
        switch (vmf->pgoff) {
        case VDSO_TIME_PAGE_OFFSET:
                if (!IS_ENABLED(CONFIG_GENERIC_GETTIMEOFDAY))
                        return VM_FAULT_SIGBUS;
-               pfn = __phys_to_pfn(__pa_symbol(vdso_k_time_data));
+               page = virt_to_page(vdso_k_time_data);
                if (timens_page) {
                        /*
                         * Fault in VVAR page too, since it will be accessed
@@ -56,10 +57,10 @@ static vm_fault_t vvar_fault(const struct 
vm_special_mapping *sm,
                        vm_fault_t err;
 
                        addr = vmf->address + VDSO_TIMENS_PAGE_OFFSET * 
PAGE_SIZE;
-                       err = vmf_insert_pfn(vma, addr, pfn);
+                       err = vmf_insert_page(vma, addr, page);
                        if (unlikely(err & VM_FAULT_ERROR))
                                return err;
-                       pfn = page_to_pfn(timens_page);
+                       page = timens_page;
                }
                break;
        case VDSO_TIMENS_PAGE_OFFSET:
@@ -72,24 +73,25 @@ static vm_fault_t vvar_fault(const struct 
vm_special_mapping *sm,
                 */
                if (!IS_ENABLED(CONFIG_TIME_NS) || !timens_page)
                        return VM_FAULT_SIGBUS;
-               pfn = __phys_to_pfn(__pa_symbol(vdso_k_time_data));
+               page = virt_to_page(vdso_k_time_data);
                break;
        case VDSO_RNG_PAGE_OFFSET:
                if (!IS_ENABLED(CONFIG_VDSO_GETRANDOM))
                        return VM_FAULT_SIGBUS;
-               pfn = __phys_to_pfn(__pa_symbol(vdso_k_rng_data));
+               page = virt_to_page(vdso_k_rng_data);
                break;
        case VDSO_ARCH_PAGES_START ... VDSO_ARCH_PAGES_END:
                if (!IS_ENABLED(CONFIG_ARCH_HAS_VDSO_ARCH_DATA))
                        return VM_FAULT_SIGBUS;
-               pfn = __phys_to_pfn(__pa_symbol(vdso_k_arch_data)) +
-                       vmf->pgoff - VDSO_ARCH_PAGES_START;
+               page = virt_to_page(vdso_k_arch_data) + vmf->pgoff - 
VDSO_ARCH_PAGES_START;
                break;
        default:
                return VM_FAULT_SIGBUS;
        }
 
-       return vmf_insert_pfn(vma, vmf->address, pfn);
+       get_page(page);
+       vmf->page = page;
+       return 0;
 }
 
 const struct vm_special_mapping vdso_vvar_mapping = {
@@ -101,7 +103,7 @@ struct vm_area_struct *vdso_install_vvar_mapping(struct 
mm_struct *mm, unsigned
 {
        return _install_special_mapping(mm, addr, VDSO_NR_PAGES * PAGE_SIZE,
                                        VM_READ | VM_MAYREAD | VM_IO | 
VM_DONTDUMP |
-                                       VM_PFNMAP | VM_SEALED_SYSMAP,
+                                       VM_MIXEDMAP | VM_SEALED_SYSMAP,
                                        &vdso_vvar_mapping);
 }
 

-- 
2.51.0


Reply via email to