On 07/15/2011 06:23 AM, Peter Zijlstra wrote:
On Fri, 2011-07-15 at 16:07 +0800, Shan Hai wrote:
The kernel has no write permission on COW pages by default on e500 core, this
will cause endless loop in futex_lock_pi, because futex code assumes the kernel
has write permission on COW pages. Grant write permission to the kernel on COW
pages when access violation page fault occurs.

Signed-off-by: Shan Hai<haishan....@gmail.com>
---
  arch/powerpc/include/asm/futex.h |   11 ++++++++++-
  arch/powerpc/include/asm/tlb.h   |   25 +++++++++++++++++++++++++
  2 files changed, 35 insertions(+), 1 deletions(-)

diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h
index c94e4a3..54c3e74 100644
--- a/arch/powerpc/include/asm/futex.h
+++ b/arch/powerpc/include/asm/futex.h
@@ -8,6 +8,7 @@
  #include<asm/errno.h>
  #include<asm/synch.h>
  #include<asm/asm-compat.h>
+#include<asm/tlb.h>

  #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
    __asm__ __volatile ( \
@@ -113,7 +114,15 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
          : "cc", "memory");

        *uval = prev;
-        return ret;
+
+       /* Futex assumes the kernel has permission to write to
+        * COW pages, grant the kernel write permission on COW
+        * pages because it has none by default.
+        */
+       if (ret == -EFAULT)
+               __tlb_fixup_write_permission(current->mm, (unsigned long)uaddr);
+
+       return ret;
  }

  #endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h
index e2b428b..3863c6a 100644
--- a/arch/powerpc/include/asm/tlb.h
+++ b/arch/powerpc/include/asm/tlb.h
@@ -45,5 +45,30 @@ static inline void __tlb_remove_tlb_entry(struct mmu_gather 
*tlb, pte_t *ptep,
  #endif
  }

+/* Grant write permission to the kernel on a page. */
+static inline void __tlb_fixup_write_permission(struct mm_struct *mm,
+                                               unsigned long address)
+{
+#if defined(CONFIG_FSL_BOOKE)
+       /* Grant write permission to the kernel on a page by setting TLB.SW
+        * bit, the bit setting operation is tricky here, calling
+        * handle_mm_fault with FAULT_FLAG_WRITE causes _PAGE_DIRTY bit of
+        * the pte to be set, the _PAGE_DIRTY of the pte is translated into
+        * TLB.SW on Powerpc e500 core.
+        */
+
+       struct vm_area_struct *vma;
+
+       vma = find_vma(mm, address);
Uhm, find_vma() needs mmap_sem, and futex_atomic_cmpxchg_inatomic() is
most certainly not called with that lock held.

+       if (likely(vma)) {
+               /* only fixup present page */
+               if (follow_page(vma, address, FOLL_WRITE)) {
+                       handle_mm_fault(mm, vma, address, FAULT_FLAG_WRITE);
So how can this toggle your sw dirty/young tracking, that's pretty much
what gup(.write=1) does too!


That's right the gup(.write=1) want to do the same thing as the
above code snippet, but it failed for the following reason:
because the get_user_pages() would not dirty pte for the reason
the follow_page() returns not NULL on *present* and *writable*
page, the page which holds the lock is present because its a shared page,
writable because demand paging set that up so for shared
writable page, so the handle_mm_fault() in the __get_user_page()
could not be called.

Why the above code could do the same task, because by calling
handle_mm_fault() will set pte dirty by
[do_annonymous_page(), memory.c]
if (vma->vm_flags & VM_WRITE)
                entry = pte_mkwrite(pte_mkdirty(entry));

Thanks
Shan Hai

+                       flush_tlb_page(vma, address);
+               }
+       }
+#endif
+}
+
  #endif /* __KERNEL__ */
  #endif /* __ASM_POWERPC_TLB_H */

_______________________________________________
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

Reply via email to