From: Aneesh Kumar K.V <aneesh.ku...@linux.ibm.com>

commit c6f3c5ee40c10bb65725047a220570f718507001 upstream.

With some architectures like ppc64, set_pmd_at() cannot cope with a
situation where there is already some (different) valid entry present.

Use pmdp_set_access_flags() instead to modify the pfn which is built to
deal with modifying existing PMD entries.

This is similar to commit cae85cb8add3 ("mm/memory.c: fix modifying of
page protection by insert_pfn()")

We also do similar update w.r.t insert_pfn_pud eventhough ppc64 don't
support pud pfn entries now.

Without this patch we also see the below message in kernel log "BUG:
non-zero pgtables_bytes on freeing mm:"

Link: http://lkml.kernel.org/r/20190402115125.18803-1-aneesh.ku...@linux.ibm.com
Signed-off-by: Aneesh Kumar K.V <aneesh.ku...@linux.ibm.com>
Reported-by: Chandan Rajendra <chan...@linux.ibm.com>
Reviewed-by: Jan Kara <j...@suse.cz>
Cc: Dan Williams <dan.j.willi...@intel.com>
Cc: <sta...@vger.kernel.org>
Signed-off-by: Andrew Morton <a...@linux-foundation.org>
Signed-off-by: Linus Torvalds <torva...@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gre...@linuxfoundation.org>

---
 mm/huge_memory.c |   36 ++++++++++++++++++++++++++++++++++++
 1 file changed, 36 insertions(+)

--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -734,6 +734,21 @@ static void insert_pfn_pmd(struct vm_are
        spinlock_t *ptl;
 
        ptl = pmd_lock(mm, pmd);
+       if (!pmd_none(*pmd)) {
+               if (write) {
+                       if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) {
+                               WARN_ON_ONCE(!is_huge_zero_pmd(*pmd));
+                               goto out_unlock;
+                       }
+                       entry = pmd_mkyoung(*pmd);
+                       entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
+                       if (pmdp_set_access_flags(vma, addr, pmd, entry, 1))
+                               update_mmu_cache_pmd(vma, addr, pmd);
+               }
+
+               goto out_unlock;
+       }
+
        entry = pmd_mkhuge(pfn_t_pmd(pfn, prot));
        if (pfn_t_devmap(pfn))
                entry = pmd_mkdevmap(entry);
@@ -745,11 +760,16 @@ static void insert_pfn_pmd(struct vm_are
        if (pgtable) {
                pgtable_trans_huge_deposit(mm, pmd, pgtable);
                mm_inc_nr_ptes(mm);
+               pgtable = NULL;
        }
 
        set_pmd_at(mm, addr, pmd, entry);
        update_mmu_cache_pmd(vma, addr, pmd);
+
+out_unlock:
        spin_unlock(ptl);
+       if (pgtable)
+               pte_free(mm, pgtable);
 }
 
 vm_fault_t vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
@@ -800,6 +820,20 @@ static void insert_pfn_pud(struct vm_are
        spinlock_t *ptl;
 
        ptl = pud_lock(mm, pud);
+       if (!pud_none(*pud)) {
+               if (write) {
+                       if (pud_pfn(*pud) != pfn_t_to_pfn(pfn)) {
+                               WARN_ON_ONCE(!is_huge_zero_pud(*pud));
+                               goto out_unlock;
+                       }
+                       entry = pud_mkyoung(*pud);
+                       entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma);
+                       if (pudp_set_access_flags(vma, addr, pud, entry, 1))
+                               update_mmu_cache_pud(vma, addr, pud);
+               }
+               goto out_unlock;
+       }
+
        entry = pud_mkhuge(pfn_t_pud(pfn, prot));
        if (pfn_t_devmap(pfn))
                entry = pud_mkdevmap(entry);
@@ -809,6 +843,8 @@ static void insert_pfn_pud(struct vm_are
        }
        set_pud_at(mm, addr, pud, entry);
        update_mmu_cache_pud(vma, addr, pud);
+
+out_unlock:
        spin_unlock(ptl);
 }
 


Reply via email to