Currently PTE gets updated in wp_pfn_shared() after dax_pfn_mkwrite()
has released corresponding radix tree entry lock. When we want to
writeprotect PTE on cache flush, we need PTE modification to happen
under radix tree entry lock to ensure consisten updates of PTE and radix
tree (standard faults use page lock to ensure this consistency). So move
update of PTE bit into dax_pfn_mkwrite().

Signed-off-by: Jan Kara <[email protected]>
---
 fs/dax.c    | 6 ++++++
 mm/memory.c | 2 +-
 2 files changed, 7 insertions(+), 1 deletion(-)

diff --git a/fs/dax.c b/fs/dax.c
index 513881431be6..e8d61ac3d148 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -1218,6 +1218,12 @@ int dax_pfn_mkwrite(struct vm_area_struct *vma, struct 
vm_fault *vmf)
        if (!entry || !radix_tree_exceptional_entry(entry))
                goto out;
        radix_tree_tag_set(&mapping->page_tree, index, PAGECACHE_TAG_DIRTY);
+       /*
+        * If we race with somebody updating the PTE and finish_mkwrite_fault()
+        * fails, we don't care. We need to return VM_FAULT_NOPAGE and retry
+        * the fault in either case.
+        */
+       finish_mkwrite_fault(vma, vmf);
        put_unlocked_mapping_entry(mapping, index, entry);
 out:
        spin_unlock_irq(&mapping->tree_lock);
diff --git a/mm/memory.c b/mm/memory.c
index 30cf7b36df48..47241c2f6178 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2315,7 +2315,7 @@ static int wp_pfn_shared(struct mm_struct *mm,
                         linear_page_index(vma, address),
                         FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE, orig_pte);
                ret = vma->vm_ops->pfn_mkwrite(vma, &vmf);
-               if (ret & VM_FAULT_ERROR)
+               if (ret & VM_FAULT_ERROR || ret & VM_FAULT_NOPAGE)
                        return ret;
                if (finish_mkwrite_fault(vma, &vmf) < 0)
                        return 0;
-- 
2.6.6

_______________________________________________
Linux-nvdimm mailing list
[email protected]
https://lists.01.org/mailman/listinfo/linux-nvdimm

Reply via email to