4.4-stable review patch.  If anyone has any objections, please let me know.

------------------

From: Matt Fleming <[email protected]>

commit edc3b9129cecd0f0857112136f5b8b1bc1d45918 upstream.

The x86 pageattr code is confused about the data that is stored
in cpa->pfn, sometimes it's treated as a page frame number,
sometimes it's treated as an unshifted physical address, and in
one place it's treated as a pte.

The result of this is that the mapping functions do not map the
intended physical address.

This isn't a problem in practice because most of the addresses
we're mapping in the EFI code paths are already mapped in
'trampoline_pgd' and so the pageattr mapping functions don't
actually do anything in this case. But when we move to using a
separate page table for the EFI runtime this will be an issue.

Signed-off-by: Matt Fleming <[email protected]>
Reviewed-by: Borislav Petkov <[email protected]>
Acked-by: Borislav Petkov <[email protected]>
Cc: Andy Lutomirski <[email protected]>
Cc: Ard Biesheuvel <[email protected]>
Cc: Borislav Petkov <[email protected]>
Cc: Brian Gerst <[email protected]>
Cc: Dave Hansen <[email protected]>
Cc: Denys Vlasenko <[email protected]>
Cc: H. Peter Anvin <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Sai Praneeth Prakhya <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Toshi Kani <[email protected]>
Cc: [email protected]
Link: 
http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
Cc: "Ghannam, Yazen" <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>

---
 arch/x86/mm/pageattr.c         |   17 ++++++-----------
 arch/x86/platform/efi/efi_64.c |   16 ++++++++++------
 2 files changed, 16 insertions(+), 17 deletions(-)

--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -911,15 +911,10 @@ static void populate_pte(struct cpa_data
        pte = pte_offset_kernel(pmd, start);
 
        while (num_pages-- && start < end) {
-
-               /* deal with the NX bit */
-               if (!(pgprot_val(pgprot) & _PAGE_NX))
-                       cpa->pfn &= ~_PAGE_NX;
-
-               set_pte(pte, pfn_pte(cpa->pfn >> PAGE_SHIFT, pgprot));
+               set_pte(pte, pfn_pte(cpa->pfn, pgprot));
 
                start    += PAGE_SIZE;
-               cpa->pfn += PAGE_SIZE;
+               cpa->pfn++;
                pte++;
        }
 }
@@ -975,11 +970,11 @@ static int populate_pmd(struct cpa_data
 
                pmd = pmd_offset(pud, start);
 
-               set_pmd(pmd, __pmd(cpa->pfn | _PAGE_PSE |
+               set_pmd(pmd, __pmd(cpa->pfn << PAGE_SHIFT | _PAGE_PSE |
                                   massage_pgprot(pmd_pgprot)));
 
                start     += PMD_SIZE;
-               cpa->pfn  += PMD_SIZE;
+               cpa->pfn  += PMD_SIZE >> PAGE_SHIFT;
                cur_pages += PMD_SIZE >> PAGE_SHIFT;
        }
 
@@ -1048,11 +1043,11 @@ static int populate_pud(struct cpa_data
         * Map everything starting from the Gb boundary, possibly with 1G pages
         */
        while (end - start >= PUD_SIZE) {
-               set_pud(pud, __pud(cpa->pfn | _PAGE_PSE |
+               set_pud(pud, __pud(cpa->pfn << PAGE_SHIFT | _PAGE_PSE |
                                   massage_pgprot(pud_pgprot)));
 
                start     += PUD_SIZE;
-               cpa->pfn  += PUD_SIZE;
+               cpa->pfn  += PUD_SIZE >> PAGE_SHIFT;
                cur_pages += PUD_SIZE >> PAGE_SHIFT;
                pud++;
        }
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -143,7 +143,7 @@ void efi_sync_low_kernel_mappings(void)
 
 int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
 {
-       unsigned long text;
+       unsigned long pfn, text;
        struct page *page;
        unsigned npages;
        pgd_t *pgd;
@@ -160,7 +160,8 @@ int __init efi_setup_page_tables(unsigne
         * and ident-map those pages containing the map before calling
         * phys_efi_set_virtual_address_map().
         */
-       if (kernel_map_pages_in_pgd(pgd, pa_memmap, pa_memmap, num_pages, 
_PAGE_NX)) {
+       pfn = pa_memmap >> PAGE_SHIFT;
+       if (kernel_map_pages_in_pgd(pgd, pfn, pa_memmap, num_pages, _PAGE_NX)) {
                pr_err("Error ident-mapping new memmap (0x%lx)!\n", pa_memmap);
                return 1;
        }
@@ -185,8 +186,9 @@ int __init efi_setup_page_tables(unsigne
 
        npages = (_end - _text) >> PAGE_SHIFT;
        text = __pa(_text);
+       pfn = text >> PAGE_SHIFT;
 
-       if (kernel_map_pages_in_pgd(pgd, text >> PAGE_SHIFT, text, npages, 0)) {
+       if (kernel_map_pages_in_pgd(pgd, pfn, text, npages, 0)) {
                pr_err("Failed to map kernel text 1:1\n");
                return 1;
        }
@@ -204,12 +206,14 @@ void __init efi_cleanup_page_tables(unsi
 static void __init __map_region(efi_memory_desc_t *md, u64 va)
 {
        pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd);
-       unsigned long pf = 0;
+       unsigned long flags = 0;
+       unsigned long pfn;
 
        if (!(md->attribute & EFI_MEMORY_WB))
-               pf |= _PAGE_PCD;
+               flags |= _PAGE_PCD;
 
-       if (kernel_map_pages_in_pgd(pgd, md->phys_addr, va, md->num_pages, pf))
+       pfn = md->phys_addr >> PAGE_SHIFT;
+       if (kernel_map_pages_in_pgd(pgd, pfn, va, md->num_pages, flags))
                pr_warn("Error mapping PA 0x%llx -> VA 0x%llx!\n",
                           md->phys_addr, va);
 }


Reply via email to