[PATCH v9 50/61] memfd: Convert shmem_tag_pins to XArray

2018-03-13 Thread Matthew Wilcox
From: Matthew Wilcox 

Simplify the locking by taking the spinlock while we walk the tree on
the assumption that many acquires and releases of the lock will be
worse than holding the lock for a (potentially) long time.

We could replicate the same locking behaviour with the xarray, but would
have to be careful that the xa_node wasn't RCU-freed under us before we
took the lock.

Signed-off-by: Matthew Wilcox 
---
 mm/memfd.c | 43 ++-
 1 file changed, 18 insertions(+), 25 deletions(-)

diff --git a/mm/memfd.c b/mm/memfd.c
index 4cf7401cb09c..3b299d72df78 100644
--- a/mm/memfd.c
+++ b/mm/memfd.c
@@ -21,7 +21,7 @@
 #include 
 
 /*
- * We need a tag: a new tag would expand every radix_tree_node by 8 bytes,
+ * We need a tag: a new tag would expand every xa_node by 8 bytes,
  * so reuse a tag which we firmly believe is never set or cleared on shmem.
  */
 #define SHMEM_TAG_PINNEDPAGECACHE_TAG_TOWRITE
@@ -29,35 +29,28 @@
 
 static void shmem_tag_pins(struct address_space *mapping)
 {
-   struct radix_tree_iter iter;
-   void __rcu **slot;
-   pgoff_t start;
+   XA_STATE(xas, >i_pages, 0);
struct page *page;
+   unsigned int tagged = 0;
 
lru_add_drain();
-   start = 0;
-   rcu_read_lock();
-
-   radix_tree_for_each_slot(slot, >i_pages, , start) {
-   page = radix_tree_deref_slot(slot);
-   if (!page || radix_tree_exception(page)) {
-   if (radix_tree_deref_retry(page)) {
-   slot = radix_tree_iter_retry();
-   continue;
-   }
-   } else if (page_count(page) - page_mapcount(page) > 1) {
-   xa_lock_irq(>i_pages);
-   radix_tree_tag_set(>i_pages, iter.index,
-  SHMEM_TAG_PINNED);
-   xa_unlock_irq(>i_pages);
-   }
 
-   if (need_resched()) {
-   slot = radix_tree_iter_resume(slot, );
-   cond_resched_rcu();
-   }
+   xas_lock_irq();
+   xas_for_each(, page, ULONG_MAX) {
+   if (xa_is_value(page))
+   continue;
+   if (page_count(page) - page_mapcount(page) > 1)
+   xas_set_tag(, SHMEM_TAG_PINNED);
+
+   if (++tagged % XA_CHECK_SCHED)
+   continue;
+
+   xas_pause();
+   xas_unlock_irq();
+   cond_resched();
+   xas_lock_irq();
}
-   rcu_read_unlock();
+   xas_unlock_irq();
 }
 
 /*
-- 
2.16.1



[PATCH v9 50/61] memfd: Convert shmem_tag_pins to XArray

2018-03-13 Thread Matthew Wilcox
From: Matthew Wilcox 

Simplify the locking by taking the spinlock while we walk the tree on
the assumption that many acquires and releases of the lock will be
worse than holding the lock for a (potentially) long time.

We could replicate the same locking behaviour with the xarray, but would
have to be careful that the xa_node wasn't RCU-freed under us before we
took the lock.

Signed-off-by: Matthew Wilcox 
---
 mm/memfd.c | 43 ++-
 1 file changed, 18 insertions(+), 25 deletions(-)

diff --git a/mm/memfd.c b/mm/memfd.c
index 4cf7401cb09c..3b299d72df78 100644
--- a/mm/memfd.c
+++ b/mm/memfd.c
@@ -21,7 +21,7 @@
 #include 
 
 /*
- * We need a tag: a new tag would expand every radix_tree_node by 8 bytes,
+ * We need a tag: a new tag would expand every xa_node by 8 bytes,
  * so reuse a tag which we firmly believe is never set or cleared on shmem.
  */
 #define SHMEM_TAG_PINNEDPAGECACHE_TAG_TOWRITE
@@ -29,35 +29,28 @@
 
 static void shmem_tag_pins(struct address_space *mapping)
 {
-   struct radix_tree_iter iter;
-   void __rcu **slot;
-   pgoff_t start;
+   XA_STATE(xas, >i_pages, 0);
struct page *page;
+   unsigned int tagged = 0;
 
lru_add_drain();
-   start = 0;
-   rcu_read_lock();
-
-   radix_tree_for_each_slot(slot, >i_pages, , start) {
-   page = radix_tree_deref_slot(slot);
-   if (!page || radix_tree_exception(page)) {
-   if (radix_tree_deref_retry(page)) {
-   slot = radix_tree_iter_retry();
-   continue;
-   }
-   } else if (page_count(page) - page_mapcount(page) > 1) {
-   xa_lock_irq(>i_pages);
-   radix_tree_tag_set(>i_pages, iter.index,
-  SHMEM_TAG_PINNED);
-   xa_unlock_irq(>i_pages);
-   }
 
-   if (need_resched()) {
-   slot = radix_tree_iter_resume(slot, );
-   cond_resched_rcu();
-   }
+   xas_lock_irq();
+   xas_for_each(, page, ULONG_MAX) {
+   if (xa_is_value(page))
+   continue;
+   if (page_count(page) - page_mapcount(page) > 1)
+   xas_set_tag(, SHMEM_TAG_PINNED);
+
+   if (++tagged % XA_CHECK_SCHED)
+   continue;
+
+   xas_pause();
+   xas_unlock_irq();
+   cond_resched();
+   xas_lock_irq();
}
-   rcu_read_unlock();
+   xas_unlock_irq();
 }
 
 /*
-- 
2.16.1