Hi Catalin

On 2019/9/24 18:33, Catalin Marinas wrote:
On Tue, Sep 24, 2019 at 06:43:06AM +0000, Justin He (Arm Technology China) 
wrote:
Catalin Marinas wrote:
On Sat, Sep 21, 2019 at 09:50:54PM +0800, Jia He wrote:
@@ -2151,21 +2163,53 @@ static inline void cow_user_page(struct page *dst, 
struct page *src, unsigned lo
         * fails, we just zero-fill it. Live with it.
         */
        if (unlikely(!src)) {
-               void *kaddr = kmap_atomic(dst);
-               void __user *uaddr = (void __user *)(va & PAGE_MASK);
+               void *kaddr;
+               pte_t entry;
+               void __user *uaddr = (void __user *)(addr & PAGE_MASK);

+               /* On architectures with software "accessed" bits, we would
+                * take a double page fault, so mark it accessed here.
+                */
[...]
+               if (arch_faults_on_old_pte() && !pte_young(vmf->orig_pte)) {
+                       vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr,
+                                                      &vmf->ptl);
+                       if (likely(pte_same(*vmf->pte, vmf->orig_pte))) {
+                               entry = pte_mkyoung(vmf->orig_pte);
+                               if (ptep_set_access_flags(vma, addr,
+                                                         vmf->pte, entry, 0))
+                                       update_mmu_cache(vma, addr, vmf->pte);
+                       } else {
+                               /* Other thread has already handled the fault
+                                * and we don't need to do anything. If it's
+                                * not the case, the fault will be triggered
+                                * again on the same address.
+                                */
+                               pte_unmap_unlock(vmf->pte, vmf->ptl);
+                               return false;
+                       }
+                       pte_unmap_unlock(vmf->pte, vmf->ptl);
+               }
[...]
+
+               kaddr = kmap_atomic(dst);
Since you moved the kmap_atomic() here, could the above
arch_faults_on_old_pte() run in a preemptible context? I suggested to
add a WARN_ON in patch 2 to be sure.
Should I move kmap_atomic back to the original line? Thus, we can make sure
that arch_faults_on_old_pte() is in the context of preempt_disabled?
Otherwise, arch_faults_on_old_pte() may cause plenty of warning if I add
a WARN_ON in arch_faults_on_old_pte.  I tested it when I enable the PREEMPT=y
on a ThunderX2 qemu guest.
So we have two options here:

1. Change arch_faults_on_old_pte() scope to the whole system rather than
    just the current CPU. You'd have to wire up a new arm64 capability
    for the access flag but this way we don't care whether it's
    preemptible or not.

2. Keep the arch_faults_on_old_pte() per-CPU but make sure we are not
    preempted here. The kmap_atomic() move would do but you'd have to
    kunmap_atomic() before the return.

I think the answer to my question below also has some implication on
which option to pick:

                /*
                 * This really shouldn't fail, because the page is there
                 * in the page tables. But it might just be unreadable,
                 * in which case we just give up and fill the result with
                 * zeroes.
                 */
-               if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE))
+               if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
+                       /* Give a warn in case there can be some obscure
+                        * use-case
+                        */
+                       WARN_ON_ONCE(1);
That's more of a question for the mm guys: at this point we do the
copying with the ptl released; is there anything else that could have
made the pte old in the meantime? I think unuse_pte() is only called on
anonymous vmas, so it shouldn't be the case here.
If we need to hold the ptl here, you could as well have an enclosing
kmap/kunmap_atomic (option 2) with some goto instead of "return false".

I am not 100% sure that I understand your suggestion well, so I drafted the 
patch

here:

Changes: optimize the indentions

     hold the ptl longer


-static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma)
+static inline bool cow_user_page(struct page *dst, struct page *src,
+                 struct vm_fault *vmf)
 {
+    struct vm_area_struct *vma = vmf->vma;
+    struct mm_struct *mm = vma->vm_mm;
+    unsigned long addr = vmf->address;
+    bool ret;
+    pte_t entry;
+    void *kaddr;
+    void __user *uaddr;
+
     debug_dma_assert_idle(src);

+    if (likely(src)) {
+        copy_user_highpage(dst, src, addr, vma);
+        return true;
+    }
+
     /*
      * If the source page was a PFN mapping, we don't have
      * a "struct page" for it. We do a best-effort copy by
      * just copying from the original user address. If that
      * fails, we just zero-fill it. Live with it.
      */
-    if (unlikely(!src)) {
-        void *kaddr = kmap_atomic(dst);
-        void __user *uaddr = (void __user *)(va & PAGE_MASK);
+    kaddr = kmap_atomic(dst);
+    uaddr = (void __user *)(addr & PAGE_MASK);
+
+    /*
+     * On architectures with software "accessed" bits, we would
+     * take a double page fault, so mark it accessed here.
+     */
+    vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
+    if (arch_faults_on_old_pte() && !pte_young(vmf->orig_pte)) {
+        if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
+            /*
+             * Other thread has already handled the fault
+             * and we don't need to do anything. If it's
+             * not the case, the fault will be triggered
+             * again on the same address.
+             */
+            ret = false;
+            goto pte_unlock;
+        }
+
+        entry = pte_mkyoung(vmf->orig_pte);
+        if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0))
+            update_mmu_cache(vma, addr, vmf->pte);
+    }

+    /*
+     * This really shouldn't fail, because the page is there
+     * in the page tables. But it might just be unreadable,
+     * in which case we just give up and fill the result with
+     * zeroes.
+     */
+    if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
         /*
-         * This really shouldn't fail, because the page is there
-         * in the page tables. But it might just be unreadable,
-         * in which case we just give up and fill the result with
-         * zeroes.
+         * Give a warn in case there can be some obscure
+         * use-case
          */
-        if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE))
-            clear_page(kaddr);
-        kunmap_atomic(kaddr);
-        flush_dcache_page(dst);
-    } else
-        copy_user_highpage(dst, src, va, vma);
+        WARN_ON_ONCE(1);
+        clear_page(kaddr);
+    }
+
+    ret = true;
+
+pte_unlock:
+    pte_unmap_unlock(vmf->pte, vmf->ptl);
+    kunmap_atomic(kaddr);
+    flush_dcache_page(dst);
+
+    return ret;
 }


---
Cheers,
Justin (Jia He)

Reply via email to