From: Matthew Wilcox <mawil...@microsoft.com>

This is essentially xa_cmpxchg() with the locking handled above us,
and it doesn't have to handle replacing a NULL entry.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 mm/truncate.c | 15 ++++++---------
 1 file changed, 6 insertions(+), 9 deletions(-)

diff --git a/mm/truncate.c b/mm/truncate.c
index 69bb743dd7e5..70323c347298 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -33,15 +33,12 @@
 static inline void __clear_shadow_entry(struct address_space *mapping,
                                pgoff_t index, void *entry)
 {
-       struct radix_tree_node *node;
-       void **slot;
+       XA_STATE(xas, &mapping->pages, index);
 
-       if (!__radix_tree_lookup(&mapping->pages, index, &node, &slot))
+       xas_set_update(&xas, workingset_update_node);
+       if (xas_load(&xas) != entry)
                return;
-       if (*slot != entry)
-               return;
-       __radix_tree_replace(&mapping->pages, node, slot, NULL,
-                            workingset_update_node);
+       xas_store(&xas, NULL);
        mapping->nrexceptional--;
 }
 
@@ -746,10 +743,10 @@ int invalidate_inode_pages2_range(struct address_space 
*mapping,
                index++;
        }
        /*
-        * For DAX we invalidate page tables after invalidating radix tree.  We
+        * For DAX we invalidate page tables after invalidating page cache.  We
         * could invalidate page tables while invalidating each entry however
         * that would be expensive. And doing range unmapping before doesn't
-        * work as we have no cheap way to find whether radix tree entry didn't
+        * work as we have no cheap way to find whether page cache entry didn't
         * get remapped later.
         */
        if (dax_mapping(mapping)) {
-- 
2.15.0

--
To unsubscribe from this list: send the line "unsubscribe linux-usb" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to